Skip to content
Snippets Groups Projects
Commit 2a4abf20 authored by Brice Convers's avatar Brice Convers
Browse files

Report added

parent d898e423
No related branches found
No related tags found
No related merge requests found
File added
images/img_5.jpg

1.91 MiB

images/img_6.jpg

1.38 MiB

File added
File added
File added
- To reconstruct these images we provide you the forward operator, modeling the effect of a CFA camera with 2 different CFA patterns: either Bayer of Quad Bayer pattern. This operation is described in src/forward_operator.py # Image Analysis Project
## Architecture Numerous RGB cameras in the commercial sector employ Color Filter Array (CFA) technology.
This technology involves an array of red, green, or blue filters placed atop the sensors, usually
organized in periodic patterns. The incident light is consequently filtered by each filter, before
being captured by the sensor. The typical process of acquiring images utilizes a predefined CFA
pattern to allocate a color to each pixel of the sensor. This code project is to demosaicing these CFA Image.
```bash You can check the report called **Image_Analysis_Project_Report_Brice_Convers** to find more information about it.
sicom_image_analysis_project/ ## How to use
├─.gitignore # Git's ignore file
├─images/ # All images you need to reconstruct
├─main.ipynb # A notebook to experiment with the code
├─output/ # The output folder where you can save your reconstruction
├─README.md # Readme, contains all information about the project
├─readme_imgs/ # Images for the Readme
├─requirements.txt # Requirement file for packages installation
└─src/ # All of the source files for the project
├─checks.py # File containing some sanity checks
├─forward_model.py # File containing the CFA operator
├─utils.py # Some utilities
└─methods/
├─baseline/ # Example of reconstruction
└─template/ # Template of your project (to be copied)
To use the code project you can move the **main_template.py** outside the **src** file and execute it.
Or you can also call the **run_reconstruction** method in you programme.
```Python
import src.methods.brice_convers.dataHandler as DataHandler
import src.methods.brice_convers.dataEvaluation as DataEvaluation
import time
WORKING_DIRECOTRY_PATH = "SICOM_Image_Analysis/sicom_image_analysis_project/"
DataHandler = DataHandler.DataHandler(WORKING_DIRECOTRY_PATH)
DataEvaluation = DataEvaluation.DataEvaluation(DataHandler)
def main(DataHandler):
IMAGE_PATH = WORKING_DIRECOTRY_PATH + "images/"
CFA_NAME = "quad_bayer"
METHOD = "menon"
startTime = time.time()
DataHandler.list_images(IMAGE_PATH)
DataHandler.print_list_images()
DataHandler.compute_CFA_images(CFA_NAME)
DataHandler.compute_reconstruction_images(METHOD, {"cfa": CFA_NAME})
#The first agurment (ex: 3) is the image index in the list print by "DataHandler.print_list_images()"
DataHandler.plot_reconstructed_image(3, METHOD, {"cfa": CFA_NAME}, zoomSize="large")
DataEvaluation.print_metrics(3, METHOD)
endTime = time.time()
print("[INFO] Elapsed time: " + str(endTime - startTime) + "s")
print("[INFO] End")
if __name__ == "__main__":
main(DataHandler)
``` ```
## TODO List:
- Fix menon method pour quad bayer pattern with landscape picture
## References:
[1] [*Aerial Semantic Segmentation Drone Dataset:*](https://www.kaggle.com/datasets/bulentsiyah/semantic-drone-dataset) For the dataset.
## References:
[1] [*Research Paper:*](https://ieeexplore.ieee.org/document/4032820) Used for Menon Method.
## Authors ## Authors
- [Brice Convers](https://briceconvers.com) - [Brice Convers](https://briceconvers.com)
from src.methods.brice_convers.dataHandler import DataHandler from src.methods.brice_convers.dataHandler import DataHandler
from src.utils import psnr, ssim from src.utils import psnr, ssim
from sklearn.metrics import f1_score from sklearn.metrics import f1_score, mean_squared_error
import numpy as np import numpy as np
class DataEvaluation: class DataEvaluation:
...@@ -15,20 +15,28 @@ class DataEvaluation: ...@@ -15,20 +15,28 @@ class DataEvaluation:
ssimMetric = ssim(img, res) ssimMetric = ssim(img, res)
psnrMetrc = psnr(img, res) psnrMetrc = psnr(img, res)
mse = mean_squared_error(img.flatten(), res.flatten())
mseRedPixels = mean_squared_error(img[:,:,0], res[:,:,0])
mseGreenPixels = mean_squared_error(img[:,:,1], res[:,:,1])
mseBluePixels = mean_squared_error(img[:,:,2], res[:,:,2])
miMetric = DataEvaluation.MI(img, res) miMetric = DataEvaluation.MI(img, res)
ccMetric = DataEvaluation.CC(img, res) ccMetric = DataEvaluation.CC(img, res)
sadMetric = DataEvaluation.SAD(img, res) sadMetric = DataEvaluation.SAD(img, res)
lsMetric = DataEvaluation.LS(img, res) lsMetric = DataEvaluation.LS(img, res)
print("[INFO] Metrics for image {}".format(indexImage)) print("[INFO] Metrics for image {}".format(indexImage))
print("_" * 19) print("#" * 30)
print("| SSIM: {:.6} |".format(ssimMetric)) print(" SSIM: {:.6} ".format(ssimMetric))
print("| PSNR: {:.6} |".format(psnrMetrc)) print(" PSNR: {:.6} ".format(psnrMetrc))
print("| MI: {:.6} |".format(miMetric)) print(" MSE : {:.3e} ".format(mse))
print("| CC: {:.6} |".format(ccMetric)) print(" MSE (R): {:.3e} ".format(mseRedPixels))
print("| SAD: {:.6} |".format(sadMetric)) print(" MSE (G): {:.3e} ".format(mseGreenPixels))
print("| LS: {:.6} |".format(lsMetric)) print(" MSE (B): {:.3e} ".format(mseBluePixels))
print("_" * 19) print(" MI: {:.6} ".format(miMetric))
print(" CC: {:.6} ".format(ccMetric))
print(" SAD: {:.6} ".format(sadMetric))
print(" LS: {:.3e} ".format(lsMetric))
print("#" * 30)
#Mutual Information #Mutual Information
def MI(img_mov, img_ref): def MI(img_mov, img_ref):
......
...@@ -8,12 +8,14 @@ from src.methods.brice_convers.reconstruct import run_reconstruction as run_reco ...@@ -8,12 +8,14 @@ from src.methods.brice_convers.reconstruct import run_reconstruction as run_reco
from src.methods.brice_convers.utilities import folderExists from src.methods.brice_convers.utilities import folderExists
from src.utils import normalise_image, save_image, psnr, ssim from src.utils import normalise_image, save_image, psnr, ssim
from skimage.io import imread from skimage.io import imread
import numpy as np
class DataHandler: class DataHandler:
def __init__(self, workingDirectoryPath = ""): def __init__(self, workingDirectoryPath = ""):
DataHandler.imagePaths =[] DataHandler.imagePaths =[]
DataHandler.imagePathsLabels = []
DataHandler.CFA_images = {} DataHandler.CFA_images = {}
DataHandler.reconstruction_images = {"interpolation:": {}, "menon": {}} DataHandler.reconstruction_images = {"interpolation": {}, "menon": {}}
DataHandler.IMAGE_TYPES = (".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff") DataHandler.IMAGE_TYPES = (".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff")
DataHandler.WD_PATH = workingDirectoryPath DataHandler.WD_PATH = workingDirectoryPath
...@@ -45,7 +47,9 @@ class DataHandler: ...@@ -45,7 +47,9 @@ class DataHandler:
if sum >= counter and (sum <= lastFile) or lastFile == -1: if sum >= counter and (sum <= lastFile) or lastFile == -1:
# construct the path to the image and yield it # construct the path to the image and yield it
imagePath = os.path.join(rootDir, filename) imagePath = os.path.join(rootDir, filename)
imageName = Path(imagePath).stem
DataHandler.imagePaths.append(imagePath) DataHandler.imagePaths.append(imagePath)
DataHandler.imagePathsLabels.append(imageName)
sum += 1 sum += 1
...@@ -54,7 +58,11 @@ class DataHandler: ...@@ -54,7 +58,11 @@ class DataHandler:
DataHandler.list_files(self, basePath, interval, validExts=DataHandler.IMAGE_TYPES, contains=contains) DataHandler.list_files(self, basePath, interval, validExts=DataHandler.IMAGE_TYPES, contains=contains)
print("[INFO] There are {} images.".format(len(DataHandler.imagePaths))) print("[INFO] There are {} images.".format(len(DataHandler.imagePaths)))
def plot_input_images(self, rows=3, cols=3, figsize=(20, 20), max_images = 6, title="Input Images"): def print_list_images(self):
print("[INFO] This is the order or your image in the list. IndexImage is the index in this table:")
print(DataHandler.imagePathsLabels)
def plot_input_images(self, rows=3, cols=3, figsize=(15, 5), max_images = 6, title="Input Images"):
fig = plt.figure(figsize=figsize) fig = plt.figure(figsize=figsize)
fig.suptitle(title, fontsize=16) fig.suptitle(title, fontsize=16)
...@@ -71,7 +79,7 @@ class DataHandler: ...@@ -71,7 +79,7 @@ class DataHandler:
plt.show() plt.show()
def plot_raw_transformation(self, zoom = True, specificIndex = None, rows=8, cols=3, figsize=(20, 20), max_images = 24, title="Raw Transformation"): def plot_raw_transformation(self, zoom = True, specificIndex = None, rows=8, cols=3, figsize=(15, 5), max_images = 24, title="Raw Transformation"):
fig, axs = plt.subplots(rows, cols, figsize=figsize) fig, axs = plt.subplots(rows, cols, figsize=figsize)
fig.suptitle(title, fontsize=16) fig.suptitle(title, fontsize=16)
...@@ -118,7 +126,7 @@ class DataHandler: ...@@ -118,7 +126,7 @@ class DataHandler:
print("[INFO] There are {} ploted images.".format(max_images)) print("[INFO] There are {} ploted images.".format(max_images))
def plot_specific_raw_transformation(self, indexImage, zoom = True, rows=2, cols=3, figsize=(20, 10), title="Raw Transformation"): def plot_specific_raw_transformation(self, indexImage, zoom = True, rows=2, cols=3, figsize=(15, 5), title="Raw Transformation"):
DataHandler.plot_raw_transformation(self, zoom, indexImage, rows, cols, figsize, -1, title) DataHandler.plot_raw_transformation(self, zoom, indexImage, rows, cols, figsize, -1, title)
def compute_CFA_images(self, CFA_NAME): def compute_CFA_images(self, CFA_NAME):
...@@ -133,6 +141,11 @@ class DataHandler: ...@@ -133,6 +141,11 @@ class DataHandler:
print("[ERROR] There is no image in imagePaths") print("[ERROR] There is no image in imagePaths")
return return
# Test key method
if method not in DataHandler.reconstruction_images.keys():
print("[ERROR] The method {} is not valid.".format(method))
exit(1)
nameImage = Path(DataHandler.imagePaths[indexImage]).stem nameImage = Path(DataHandler.imagePaths[indexImage]).stem
if DataHandler.CFA_images.get(nameImage) is None: if DataHandler.CFA_images.get(nameImage) is None:
...@@ -141,26 +154,21 @@ class DataHandler: ...@@ -141,26 +154,21 @@ class DataHandler:
img = DataHandler.load_image(self, indexImage) img = DataHandler.load_image(self, indexImage)
if method == "interpolation": img_CFA = DataHandler.CFA_images[nameImage].direct(img)
cfa = options.get("cfa")
if cfa is None: cfa = options.get("cfa")
print("[ERROR] You must specify the cfa.")
exit(1)
DataHandler.reconstruction_images[method].setdefault(nameImage, run_reconstruction(DataHandler.CFA_images[nameImage].direct(img), cfa)) if cfa is None:
print("[ERROR] You must specify the cfa.")
exit(1)
if method == "menon": if method == "interpolation":
pattern = options.get("pattern")
refining_step = options.get("refining_step")
if (refining_step or cfa) is None: DataHandler.reconstruction_images[method].setdefault(nameImage, run_reconstruction(img_CFA, cfa))
print("[ERROR] You must specify the cfa and the refining step.")
exit(1)
mask = DataHandler.CFA_images[nameImage].mask if method == "menon":
DataHandler.reconstruction_images[method].setdefault(nameImage, run_reconstruction_brice_convers(DataHandler.CFA_images[nameImage].direct(img), mask, pattern, refining_step)) DataHandler.reconstruction_images[method].setdefault(nameImage, run_reconstruction_brice_convers(img_CFA, cfa))
def compute_reconstruction_images(self, method, options = None): def compute_reconstruction_images(self, method, options = None):
...@@ -169,27 +177,48 @@ class DataHandler: ...@@ -169,27 +177,48 @@ class DataHandler:
print("[INFO] There are {} images which have been reconstructed.".format(len(DataHandler.reconstruction_images[method]))) print("[INFO] There are {} images which have been reconstructed.".format(len(DataHandler.reconstruction_images[method])))
def plot_reconstructed_image(self, indexImage, method, rows=1, cols=2, figsize=(10, 10), title="Reconstructed Images"): def plot_reconstructed_image(self, indexImage, method, cfa = {"cfa": "bayer"}, zoomSize = "small", rows=1, cols=4, figsize=(15, 5)):
# Test key method
if method not in DataHandler.reconstruction_images.keys():
print("[ERROR] The method {} is not valid.".format(method))
exit(1)
res = DataHandler.get_reconstructed_image(self, indexImage, method) res = DataHandler.get_reconstructed_image(self, indexImage, method)
fig, axs = plt.subplots(rows, cols, figsize=figsize) fig, axs = plt.subplots(rows, cols, figsize=figsize)
fig.suptitle("Reconstructed Image with method: {} and pattern type: {}".format(method, cfa["cfa"]), fontsize=16)
axs[0].imshow(DataHandler.load_image(self, indexImage)) axs[0].imshow(DataHandler.load_image(self, indexImage))
axs[0].set_title('Original image') axs[0].set_title('Original Image')
axs[1].imshow(res) axs[1].imshow(res)
axs[1].set_title('Reconstructed image') axs[1].set_title('Reconstructed Image')
if zoomSize == "small":
axs[2].imshow(DataHandler.load_image(self, indexImage)[800:864, 450:514])
axs[2].set_title('Zoomed Input Image')
axs[3].imshow(res[800:864, 450:514])
axs[3].set_title('Zoomed Reconstructed Image')
else:
axs[2].imshow(DataHandler.load_image(self, indexImage)[2000:2064, 2000:2064])
axs[2].set_title('Zoomed Input Image')
axs[3].imshow(res[2000:2064, 2000:2064])
axs[3].set_title('Zoomed Reconstructed Image')
outputPath = os.path.join(DataHandler.WD_PATH, "output") outputPath = os.path.join(DataHandler.WD_PATH, "output")
folderExists(outputPath) folderExists(outputPath)
imageName = Path(DataHandler.imagePaths[indexImage]).stem imageName = Path(DataHandler.imagePaths[indexImage]).stem
plotReconstructedImagePath = os.path.join(outputPath, imageName + "_" + method + ".png") plotCompImagePath = os.path.join(outputPath, "Compararison_" + imageName + "_" + method + "_" + cfa["cfa"] + ".png")
plotCompImagePath = os.path.join(outputPath, "Compararison_" + imageName + "_" + method + ".png")
#save_image( plotReconstructedImagePath, res) #save_image( plotReconstructedImagePath, res)
fig.savefig(plotCompImagePath) fig.savefig(plotCompImagePath)
def get_reconstructed_image(self, indexImage, method): def get_reconstructed_image(self, indexImage, method):
# Test key method
if method not in DataHandler.reconstruction_images.keys():
print("[ERROR] The method {} is not valid.".format(method))
exit(1)
DataHandler.indexImageExists(self, indexImage) DataHandler.indexImageExists(self, indexImage)
......
import src.methods.brice_convers.dataHandler as DataHandler
import src.methods.brice_convers.dataEvaluation as DataEvaluation
import time
WORKING_DIRECOTRY_PATH = "SICOM_Image_Analysis/sicom_image_analysis_project/"
DataHandler = DataHandler.DataHandler(WORKING_DIRECOTRY_PATH)
DataEvaluation = DataEvaluation.DataEvaluation(DataHandler)
def main(DataHandler):
IMAGE_PATH = WORKING_DIRECOTRY_PATH + "images/"
CFA_NAME = "quad_bayer"
METHOD = "menon"
startTime = time.time()
DataHandler.list_images(IMAGE_PATH)
DataHandler.print_list_images()
DataHandler.compute_CFA_images(CFA_NAME)
DataHandler.compute_reconstruction_images(METHOD, {"cfa": CFA_NAME})
DataHandler.plot_reconstructed_image(0, METHOD, {"cfa": CFA_NAME}, zoomSize="large")
DataEvaluation.print_metrics(0, METHOD)
endTime = time.time()
print("[INFO] Elapsed time: " + str(endTime - startTime) + "s")
print("[INFO] End")
if __name__ == "__main__":
main(DataHandler)
...@@ -18,13 +18,17 @@ from colour.utilities import as_float_array, ones, tsplit, tstack ...@@ -18,13 +18,17 @@ from colour.utilities import as_float_array, ones, tsplit, tstack
from scipy.ndimage.filters import convolve, convolve1d from scipy.ndimage.filters import convolve, convolve1d
from src.forward_model import CFA from src.forward_model import CFA
__author__ = "Colour Developers" def tensor_mask_to_RGB_mask(mask: ArrayLike, pixelPattern: str = "RGB"):
__copyright__ = "Copyright 2015 Colour Developers" # We extract image chanels from mask
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause" for i, letter in enumerate(pixelPattern):
__maintainer__ = "Colour Developers" if letter == "R":
__email__ = "colour-developers@colour-science.org" R_m = mask[:, :, i]
__status__ = "Production" elif letter == "G":
G_m = mask[:, :, i]
elif letter == "B":
B_m = mask[:, :, i]
return R_m, G_m, B_m
def _cnv_h(x: ArrayLike, y: ArrayLike) -> NDArrayFloat: def _cnv_h(x: ArrayLike, y: ArrayLike) -> NDArrayFloat:
"""Perform horizontal convolution.""" """Perform horizontal convolution."""
...@@ -74,53 +78,12 @@ examples_merge_from_raw_files_with_post_demosaicing.ipynb>`__. ...@@ -74,53 +78,12 @@ examples_merge_from_raw_files_with_post_demosaicing.ipynb>`__.
References References
---------- ----------
:cite:`Menon2007c` :cite:`Menon2007c`
Examples
--------
>>> CFA = np.array(
... [
... [0.30980393, 0.36078432, 0.30588236, 0.3764706],
... [0.35686275, 0.39607844, 0.36078432, 0.40000001],
... ]
... )
>>> demosaicing_CFA_Bayer_Menon2007(CFA)
array([[[ 0.30980393, 0.35686275, 0.39215687],
[ 0.30980393, 0.36078432, 0.39607844],
[ 0.30588236, 0.36078432, 0.39019608],
[ 0.32156864, 0.3764706 , 0.40000001]],
<BLANKLINE>
[[ 0.30980393, 0.35686275, 0.39215687],
[ 0.30980393, 0.36078432, 0.39607844],
[ 0.30588236, 0.36078432, 0.39019609],
[ 0.32156864, 0.3764706 , 0.40000001]]])
>>> CFA = np.array(
... [
... [0.3764706, 0.36078432, 0.40784314, 0.3764706],
... [0.35686275, 0.30980393, 0.36078432, 0.29803923],
... ]
... )
>>> demosaicing_CFA_Bayer_Menon2007(CFA, "BGGR")
array([[[ 0.30588236, 0.35686275, 0.3764706 ],
[ 0.30980393, 0.36078432, 0.39411766],
[ 0.29607844, 0.36078432, 0.40784314],
[ 0.29803923, 0.3764706 , 0.42352942]],
<BLANKLINE>
[[ 0.30588236, 0.35686275, 0.3764706 ],
[ 0.30980393, 0.36078432, 0.39411766],
[ 0.29607844, 0.36078432, 0.40784314],
[ 0.29803923, 0.3764706 , 0.42352942]]])
""" """
# We extract image chanels from mask # We extract image chanels from mask
for i, letter in enumerate(pixelPattern): R_m, G_m, B_m = tensor_mask_to_RGB_mask(mask, pixelPattern)
if letter == "R":
R_m = mask[:, :, i]
elif letter == "G":
G_m = mask[:, :, i]
elif letter == "B":
B_m = mask[:, :, i]
# We extract known pixel intensities # We extract known pixel intensities: when we have a zero in the mask, we have an unknown pixel intensity for the color
R = rawImage * R_m R = rawImage * R_m
G = rawImage * G_m G = rawImage * G_m
B = rawImage * B_m B = rawImage * B_m
...@@ -129,22 +92,34 @@ examples_merge_from_raw_files_with_post_demosaicing.ipynb>`__. ...@@ -129,22 +92,34 @@ examples_merge_from_raw_files_with_post_demosaicing.ipynb>`__.
h_0 = as_float_array([0.0, 0.5, 0.0, 0.5, 0.0]) h_0 = as_float_array([0.0, 0.5, 0.0, 0.5, 0.0])
h_1 = as_float_array([-0.25, 0.0, 0.5, 0.0, -0.25]) h_1 = as_float_array([-0.25, 0.0, 0.5, 0.0, -0.25])
# Green components interpolation along both horizontal and veritcal directions # Green components interpolation along both horizontal and veritcal directions:
# For each unkown green pixel, we compute the gradient along both horizontal and vertical directions
G_H = np.where(G_m == 0, _cnv_h(rawImage, h_0) + _cnv_h(rawImage, h_1), G) G_H = np.where(G_m == 0, _cnv_h(rawImage, h_0) + _cnv_h(rawImage, h_1), G)
G_V = np.where(G_m == 0, _cnv_v(rawImage, h_0) + _cnv_v(rawImage, h_1), G) G_V = np.where(G_m == 0, _cnv_v(rawImage, h_0) + _cnv_v(rawImage, h_1), G)
# We calculate the chrominance differences along both horizontal and vertical directions # We calculate the chrominance differences along both horizontal and vertical directions
# For each unknown red and blue pixel, we compute the difference between the pixel intensity and the horizontal green component
C_H = np.where(R_m == 1, R - G_H, 0) C_H = np.where(R_m == 1, R - G_H, 0)
C_H = np.where(B_m == 1, B - G_H, C_H) C_H = np.where(B_m == 1, B - G_H, C_H)
# Sale method with vertical green component
C_V = np.where(R_m == 1, R - G_V, 0) C_V = np.where(R_m == 1, R - G_V, 0)
C_V = np.where(B_m == 1, B - G_V, C_V) C_V = np.where(B_m == 1, B - G_V, C_V)
# We compute the directional gradients along both horizontal and vertical directions # We compute the directional gradients along both horizontal and vertical directions
D_H = np.abs(C_H - np.pad(C_H, ((0, 0), (0, 2)), mode="reflect")[:, 2:]) # First we pad our arrayes with zeros to avoid boundary effects. Acxtually, we pad with the last value of the array
D_V = np.abs(C_V - np.pad(C_V, ((0, 2), (0, 0)), mode="reflect")[2:, :]) # We add two columns to the right of the horizontal array and two rows at the bottom of the vertical array, with the reflect mode.
# Then we remove the first two columns of the horizontal array and the first two rows of the vertical array.
paded_D_H = np.pad(C_H, ((0, 0), (0, 2)), mode="reflect")[:, 2:]
paded_D_V = np.pad(C_V, ((0, 2), (0, 0)), mode="reflect")[2:, :]
del h_0, h_1, C_V, C_H # We compute the difference between the original array and the padded array.
# With the paded array, we have a difference between each pixel and the right neigborhood. We do not have issue with boundaries.
# It gives a measure of pixel intensity variation along the horizontal and vertical directions.
D_H = np.abs(C_H - paded_D_H)
D_V = np.abs(C_V - paded_D_V)
del h_0, h_1, C_V, C_H, paded_D_V, paded_D_H
# We define a sufficiently large neighborhood with a size of (5, 5). # We define a sufficiently large neighborhood with a size of (5, 5).
k = as_float_array( k = as_float_array(
...@@ -157,39 +132,46 @@ examples_merge_from_raw_files_with_post_demosaicing.ipynb>`__. ...@@ -157,39 +132,46 @@ examples_merge_from_raw_files_with_post_demosaicing.ipynb>`__.
] ]
) )
# We convolve the difference component with the neighborhood. This method is used to highlight directional variations in the image, in two direction.
d_H = convolve(D_H, k, mode="constant") d_H = convolve(D_H, k, mode="constant")
d_V = convolve(D_V, np.transpose(k), mode="constant") d_V = convolve(D_V, np.transpose(k), mode="constant")
del D_H, D_V del D_H, D_V
# We estimate the green channel with our classifiers # We estimate the green channel with our classifier
mask = d_V >= d_H mask = d_V >= d_H
G = np.where(mask, G_H, G_V) G = np.where(mask, G_H, G_V)
# We estimate the mask which represents the best directional reconstruction
M = np.where(mask, 1, 0) M = np.where(mask, 1, 0)
del d_H, d_V, G_H, G_V del d_H, d_V, G_H, G_V
# The, we estimate the red and blue channels ## The, we estimate the red and blue channels
# Red rows. # We arrays with ones at the line where there is at least one red (blue) pixel in the red (blue) mask
R_r = np.transpose(np.any(R_m == 1, axis=1)[None]) * ones(R.shape) R_r = np.transpose(np.any(R_m == 1, axis=1)[None]) * ones(R.shape)
# Blue rows.
B_r = np.transpose(np.any(B_m == 1, axis=1)[None]) * ones(B.shape) B_r = np.transpose(np.any(B_m == 1, axis=1)[None]) * ones(B.shape)
# We define a new filter
k_b = as_float_array([0.5, 0, 0.5]) k_b = as_float_array([0.5, 0, 0.5])
# We fill R array with the condition: if we are in a line where there is at least one red pixel in the red mask and we are on a green pixel in the green mask, we apply the filter horizontaly to the red channel.
# If not it means we are on a red pixel (only two possiblity) in the red mask, so we do not apply the filter because we know the red pixel
R = np.where( R = np.where(
np.logical_and(G_m == 1, R_r == 1), np.logical_and(G_m == 1, R_r == 1),
G + _cnv_h(R, k_b) - _cnv_h(G, k_b), G + _cnv_h(R, k_b) - _cnv_h(G, k_b),
R, R,
) )
# Same but we test only the line where there is at least one blue pixel in the blue mask.
# When the condition is true, we apply the filter vertically because this time red pixel are aline vertically.
R = np.where( R = np.where(
np.logical_and(G_m == 1, B_r == 1) == 1, np.logical_and(G_m == 1, B_r == 1) == 1,
G + _cnv_v(R, k_b) - _cnv_v(G, k_b), G + _cnv_v(R, k_b) - _cnv_v(G, k_b),
R, R,
) )
# It is the same logic for the blue image
B = np.where( B = np.where(
np.logical_and(G_m == 1, B_r == 1), np.logical_and(G_m == 1, B_r == 1),
G + _cnv_h(B, k_b) - _cnv_h(G, k_b), G + _cnv_h(B, k_b) - _cnv_h(G, k_b),
...@@ -202,17 +184,22 @@ examples_merge_from_raw_files_with_post_demosaicing.ipynb>`__. ...@@ -202,17 +184,22 @@ examples_merge_from_raw_files_with_post_demosaicing.ipynb>`__.
B, B,
) )
# We use again our classifiers with matrix M to estimate the red and blue channels according the most likely direction # To finish R image we need to interpolate blue pixel. We use M to know wich direction is the best and then we interpolate the blue pixel with the filter.
R = np.where( R_b = np.where(
np.logical_and(B_r == 1, B_m == 1),
np.where(
M == 1, M == 1,
B + _cnv_h(R, k_b) - _cnv_h(B, k_b), B + _cnv_h(R, k_b) - _cnv_h(B, k_b),
B + _cnv_v(R, k_b) - _cnv_v(B, k_b), B + _cnv_v(R, k_b) - _cnv_v(B, k_b),
), )
# Then we put the condition: if we are on a line where there is at least one blue pixel and we are on a blue pixel we take the previous interpolated value.
# If not we know the red pixel value and we keep it.
R = np.where(
np.logical_and(B_r == 1, B_m == 1),
R_b,
R, R,
) )
# Same idea for the blue image.
B = np.where( B = np.where(
np.logical_and(R_r == 1, R_m == 1), np.logical_and(R_r == 1, R_m == 1),
np.where( np.where(
...@@ -257,7 +244,7 @@ def refining_step_Menon2007( ...@@ -257,7 +244,7 @@ def refining_step_Menon2007(
:class:`numpy.ndarray` :class:`numpy.ndarray`
Refined *RGB* colourspace array. Refined *RGB* colourspace array.
""" """
# Unpacking the RGB and RGB_m arrays.
R, G, B = tsplit(RGB) R, G, B = tsplit(RGB)
R_m, G_m, B_m = tsplit(RGB_m) R_m, G_m, B_m = tsplit(RGB_m)
M = as_float_array(M) M = as_float_array(M)
...@@ -268,13 +255,17 @@ def refining_step_Menon2007( ...@@ -268,13 +255,17 @@ def refining_step_Menon2007(
R_G = R - G R_G = R - G
B_G = B - G B_G = B - G
# Definition of the low-pass filter.
FIR = ones(3) / 3 FIR = ones(3) / 3
# When we are on a blue pixel, we convolve the pixel with the filter in function of the best direction.
B_G_m = np.where( B_G_m = np.where(
B_m == 1, B_m == 1,
np.where(M == 1, _cnv_h(B_G, FIR), _cnv_v(B_G, FIR)), np.where(M == 1, _cnv_h(B_G, FIR), _cnv_v(B_G, FIR)),
0, 0,
) )
# Same for the red pixel.
R_G_m = np.where( R_G_m = np.where(
R_m == 1, R_m == 1,
np.where(M == 1, _cnv_h(R_G, FIR), _cnv_v(R_G, FIR)), np.where(M == 1, _cnv_h(R_G, FIR), _cnv_v(R_G, FIR)),
...@@ -283,17 +274,19 @@ def refining_step_Menon2007( ...@@ -283,17 +274,19 @@ def refining_step_Menon2007(
del B_G, R_G del B_G, R_G
# We update the green component for known red and blue pixels with the difference between the red or blue pixel intensity and the filtered pixel intensity.
G = np.where(R_m == 1, R - R_G_m, G) G = np.where(R_m == 1, R - R_G_m, G)
G = np.where(B_m == 1, B - B_G_m, G) G = np.where(B_m == 1, B - B_G_m, G)
# Updating of the red and blue components in the green locations. # Updating of the red and blue components in the green locations.
# Red rows.
# R_r is an array with ones at the line where there is at least one red pixel in the red mask.
R_r = np.transpose(np.any(R_m == 1, axis=1)[None]) * ones(R.shape) R_r = np.transpose(np.any(R_m == 1, axis=1)[None]) * ones(R.shape)
# Red columns. # R_c is an array with ones at the column where there is at least one red pixel in the red mask.
R_c = np.any(R_m == 1, axis=0)[None] * ones(R.shape) R_c = np.any(R_m == 1, axis=0)[None] * ones(R.shape)
# Blue rows. # B_r is an array with ones at the line where there is at least one blue pixel in the blue mask.
B_r = np.transpose(np.any(B_m == 1, axis=1)[None]) * ones(B.shape) B_r = np.transpose(np.any(B_m == 1, axis=1)[None]) * ones(B.shape)
# Blue columns. # B_c is an array with ones at the column where there is at least one blue pixel in the blue mask.
B_c = np.any(B_m == 1, axis=0)[None] * ones(B.shape) B_c = np.any(B_m == 1, axis=0)[None] * ones(B.shape)
R_G = R - G R_G = R - G
......
...@@ -5,10 +5,11 @@ Students can call their functions (declared in others files of src/methods/your_ ...@@ -5,10 +5,11 @@ Students can call their functions (declared in others files of src/methods/your_
import numpy as np import numpy as np
import cv2
from src.forward_model import CFA from src.forward_model import CFA
from src.methods.brice_convers.menon2007 import demosaicing_CFA_Bayer_Menon2007 from src.methods.brice_convers.menon import demosaicing_CFA_Bayer_Menon2007
import src.methods.brice_convers.configuration as configuration import src.methods.brice_convers.configuration as configuration
from src.methods.brice_convers.utilities import quad_bayer_to_bayer
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray: def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
...@@ -23,9 +24,18 @@ def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray: ...@@ -23,9 +24,18 @@ def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
""" """
input_shape = (y.shape[0], y.shape[1], 3) input_shape = (y.shape[0], y.shape[1], 3)
mask = CFA(cfa, input_shape) op = CFA("bayer", input_shape)
if cfa == "quad_bayer":
y = quad_bayer_to_bayer(y)
op = CFA("bayer", y.shape)
reconstructed_image = demosaicing_CFA_Bayer_Menon2007(y, op.mask, configuration.PIXEL_PATTERN, configuration.REFINING_STEP)
return demosaicing_CFA_Bayer_Menon2007(y, mask, configuration.PIXEL_PATTERN, configuration.REFINING_STEP) if cfa == "quad_bayer":
return cv2.resize(reconstructed_image, input_shape[:2], interpolation=cv2.INTER_CUBIC)
else:
return reconstructed_image
#### ####
#### ####
......
import os import os
import cv2
def folderExists(path): def folderExists(path):
CHECK_FOLDER = os.path.isdir(path) CHECK_FOLDER = os.path.isdir(path)
...@@ -7,3 +8,40 @@ def folderExists(path): ...@@ -7,3 +8,40 @@ def folderExists(path):
if not CHECK_FOLDER: if not CHECK_FOLDER:
os.makedirs(path) os.makedirs(path)
print("[DATA] You created a new folder : " + str(path)) print("[DATA] You created a new folder : " + str(path))
import numpy as np
def quad_bayer_to_bayer(quad_bayer_pattern):
# We test that thee quad bayer size fit with a multiple of 2 for width and height). If not, we pad it.
if quad_bayer_pattern.shape[0] % 2 != 0 or quad_bayer_pattern.shape[1] % 2 != 0:
print("[INFO] The quad bayer pattern size is not valid. We need to pad it.")
pad_schema = []
if quad_bayer_pattern.shape[0] % 2 != 0:
pad_schema.append([0, 1])
if quad_bayer_pattern.shape[1] % 2 != 0:
pad_schema.append([0, 1])
else:
pad_schema.append([0, 0])
quad_bayer_pattern = np.pad(quad_bayer_pattern, pad_schema, mode="reflect")[:, 2:]
# we create a new bayer pattern with the good size
bayer_pattern = np.zeros((quad_bayer_pattern.shape[0] // 2, quad_bayer_pattern.shape[1] // 2))
# We combine adjacent pixels to create the Bayer pattern
for i in range(0, quad_bayer_pattern.shape[0], 2):
for j in range(0, quad_bayer_pattern.shape[1], 2):
bayer_pattern[i // 2, j // 2] = (
quad_bayer_pattern[i, j] +
quad_bayer_pattern[i, j + 1] +
quad_bayer_pattern[i + 1, j] +
quad_bayer_pattern[i + 1, j + 1]
) / 4
# We resize bayer iamge to the original image size
#return cv2.resize(bayer_pattern, quad_bayer_pattern.shape, interpolation=cv2.INTER_CUBIC)
return bayer_pattern
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment