diff --git a/images/.DS_Store b/images/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..ec6b1b4665541d893fedc2b6446d96a9e17bd793
Binary files /dev/null and b/images/.DS_Store differ
diff --git a/images/img_5.jpg b/images/img_5.jpg
new file mode 100755
index 0000000000000000000000000000000000000000..63808bd80095c6199b8d923f63150f3a3463d599
Binary files /dev/null and b/images/img_5.jpg differ
diff --git a/images/img_6.jpg b/images/img_6.jpg
new file mode 100755
index 0000000000000000000000000000000000000000..c23c15e16067e45722f4c04bc245693e7dd7542b
Binary files /dev/null and b/images/img_6.jpg differ
diff --git a/src/.DS_Store b/src/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..2db64a934c0a822e722e06c687b67433760180a2
Binary files /dev/null and b/src/.DS_Store differ
diff --git a/src/methods/.DS_Store b/src/methods/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..f627673be4596f7376d005fb081b76950c670f41
Binary files /dev/null and b/src/methods/.DS_Store differ
diff --git a/src/methods/brice_convers/Image_Analysis_Project_Report_Brice_Convers.pdf b/src/methods/brice_convers/Image_Analysis_Project_Report_Brice_Convers.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..2f6d3ffbd7523461c07c0879733c26efcb9bdd74
Binary files /dev/null and b/src/methods/brice_convers/Image_Analysis_Project_Report_Brice_Convers.pdf differ
diff --git a/src/methods/brice_convers/README.md b/src/methods/brice_convers/README.md
index 029cc82af8d5b4d235cc63fc0860f764af0eb560..82ee2ae3434bf4364209e713b9f998b1041c9072 100644
--- a/src/methods/brice_convers/README.md
+++ b/src/methods/brice_convers/README.md
@@ -1,33 +1,76 @@
-- To reconstruct these images we provide you the forward operator, modeling the effect of a CFA camera with 2 different CFA patterns: either Bayer of Quad Bayer pattern. This operation is described in src/forward_operator.py
+# Image Analysis Project
 
-## Architecture
+Numerous RGB cameras in the commercial sector employ Color Filter Array (CFA) technology.
+This technology involves an array of red, green, or blue filters placed atop the sensors, usually
+organized in periodic patterns. The incident light is consequently filtered by each filter, before
+being captured by the sensor. The typical process of acquiring images utilizes a predefined CFA
+pattern to allocate a color to each pixel of the sensor. This code project is to demosaicing these CFA Image.
 
-```bash
+You can check the report called **Image_Analysis_Project_Report_Brice_Convers** to find more information about it.
 
-sicom_image_analysis_project/
-├─.gitignore                   # Git's ignore file
-├─images/                      # All images you need to reconstruct
-├─main.ipynb                   # A notebook to experiment with the code
-├─output/                      # The output folder where you can save your reconstruction
-├─README.md                    # Readme, contains all information about the project
-├─readme_imgs/                 # Images for the Readme
-├─requirements.txt             # Requirement file for packages installation
-└─src/                         # All of the source files for the project
-  ├─checks.py                  # File containing some sanity checks
-  ├─forward_model.py           # File containing the CFA operator
-  ├─utils.py                   # Some utilities
-  └─methods/
-    ├─baseline/                # Example of reconstruction
-    └─template/                # Template of your project (to be copied)
+## How to use
 
+To use the code project you can move the **main_template.py** outside the **src** file and execute it.
+Or you can also call the **run_reconstruction** method in you programme.
+
+```Python
+
+import src.methods.brice_convers.dataHandler as DataHandler
+import src.methods.brice_convers.dataEvaluation as DataEvaluation
+import time
+
+WORKING_DIRECOTRY_PATH = "SICOM_Image_Analysis/sicom_image_analysis_project/"
+
+DataHandler = DataHandler.DataHandler(WORKING_DIRECOTRY_PATH)
+DataEvaluation = DataEvaluation.DataEvaluation(DataHandler)
+
+def main(DataHandler):
+
+    IMAGE_PATH = WORKING_DIRECOTRY_PATH + "images/"
+
+    CFA_NAME = "quad_bayer"
+
+    METHOD = "menon"
+
+    startTime = time.time()
+
+    DataHandler.list_images(IMAGE_PATH)
+
+    DataHandler.print_list_images()
+
+    DataHandler.compute_CFA_images(CFA_NAME)
+
+    DataHandler.compute_reconstruction_images(METHOD, {"cfa": CFA_NAME})
+
+    #The first agurment (ex: 3) is the image index in the list print by "DataHandler.print_list_images()"
+    DataHandler.plot_reconstructed_image(3, METHOD, {"cfa": CFA_NAME}, zoomSize="large")
+
+    DataEvaluation.print_metrics(3, METHOD)
+
+    endTime = time.time()
+
+    print("[INFO] Elapsed time: " + str(endTime - startTime) + "s")
+
+    print("[INFO] End")
+
+if __name__ == "__main__":
+    main(DataHandler)
 
 ```
 
+## TODO List:
+
+- Fix menon method pour quad bayer pattern with landscape picture
 
-## References:
 
-[1] [*Aerial Semantic Segmentation Drone Dataset:*](https://www.kaggle.com/datasets/bulentsiyah/semantic-drone-dataset) For the dataset.
 
 
+
+
+
+## References:
+
+[1] [*Research Paper:*](https://ieeexplore.ieee.org/document/4032820) Used for Menon Method.
+
 ## Authors
 - [Brice Convers](https://briceconvers.com)
diff --git a/src/methods/brice_convers/dataEvaluation.py b/src/methods/brice_convers/dataEvaluation.py
index 7f2095d9f87fd5a59c16f86fa047472be9b96002..5648665120c65fb0a086d4ba68ddab70727d2df3 100644
--- a/src/methods/brice_convers/dataEvaluation.py
+++ b/src/methods/brice_convers/dataEvaluation.py
@@ -1,6 +1,6 @@
 from src.methods.brice_convers.dataHandler import DataHandler
 from src.utils import psnr, ssim
-from sklearn.metrics import f1_score
+from sklearn.metrics import f1_score, mean_squared_error
 import numpy as np
 
 class DataEvaluation:
@@ -15,20 +15,28 @@ class DataEvaluation:
 
         ssimMetric = ssim(img, res)
         psnrMetrc = psnr(img, res)
+        mse = mean_squared_error(img.flatten(), res.flatten())
+        mseRedPixels = mean_squared_error(img[:,:,0], res[:,:,0])
+        mseGreenPixels = mean_squared_error(img[:,:,1], res[:,:,1])
+        mseBluePixels = mean_squared_error(img[:,:,2], res[:,:,2])
         miMetric = DataEvaluation.MI(img, res)
         ccMetric = DataEvaluation.CC(img, res)
         sadMetric = DataEvaluation.SAD(img, res)
         lsMetric = DataEvaluation.LS(img, res)
 
         print("[INFO] Metrics for image {}".format(indexImage))
-        print("_" * 19)
-        print("| SSIM: {:.6}  |".format(ssimMetric))
-        print("| PSNR: {:.6}   |".format(psnrMetrc))
-        print("| MI: {:.6}     |".format(miMetric))
-        print("| CC: {:.6}    |".format(ccMetric))
-        print("| SAD: {:.6} |".format(sadMetric))
-        print("| LS: {:.6} |".format(lsMetric))
-        print("_" * 19)
+        print("#" * 30)
+        print(" SSIM: {:.6}  ".format(ssimMetric))
+        print(" PSNR: {:.6}   ".format(psnrMetrc))
+        print(" MSE : {:.3e}    ".format(mse))
+        print(" MSE (R): {:.3e}    ".format(mseRedPixels))
+        print(" MSE (G): {:.3e}    ".format(mseGreenPixels))
+        print(" MSE (B): {:.3e}    ".format(mseBluePixels))
+        print(" MI: {:.6}     ".format(miMetric))
+        print(" CC: {:.6}    ".format(ccMetric))
+        print(" SAD: {:.6} ".format(sadMetric))
+        print(" LS: {:.3e} ".format(lsMetric))
+        print("#" * 30)
 
     #Mutual Information
     def MI(img_mov, img_ref):
diff --git a/src/methods/brice_convers/dataHandler.py b/src/methods/brice_convers/dataHandler.py
index 95a586a7b4b2af3926698bb55de598681b9adfd4..959486644e006c65fa5b6228809dd79d0813b2b3 100644
--- a/src/methods/brice_convers/dataHandler.py
+++ b/src/methods/brice_convers/dataHandler.py
@@ -8,12 +8,14 @@ from src.methods.brice_convers.reconstruct import run_reconstruction as run_reco
 from src.methods.brice_convers.utilities import folderExists
 from src.utils import normalise_image, save_image, psnr, ssim
 from skimage.io import imread
+import numpy as np
 
 class DataHandler:
     def __init__(self, workingDirectoryPath = ""):
         DataHandler.imagePaths =[]
+        DataHandler.imagePathsLabels = []
         DataHandler.CFA_images = {}
-        DataHandler.reconstruction_images = {"interpolation:": {}, "menon": {}}
+        DataHandler.reconstruction_images = {"interpolation": {}, "menon": {}}
 
         DataHandler.IMAGE_TYPES = (".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff")
         DataHandler.WD_PATH = workingDirectoryPath
@@ -45,7 +47,9 @@ class DataHandler:
                     if sum >= counter and (sum <= lastFile) or lastFile == -1:
                         # construct the path to the image and yield it
                         imagePath = os.path.join(rootDir, filename)
+                        imageName = Path(imagePath).stem
                         DataHandler.imagePaths.append(imagePath)
+                        DataHandler.imagePathsLabels.append(imageName)
 
                     sum += 1
 
@@ -54,7 +58,11 @@ class DataHandler:
         DataHandler.list_files(self, basePath, interval, validExts=DataHandler.IMAGE_TYPES, contains=contains)
         print("[INFO] There are {} images.".format(len(DataHandler.imagePaths)))
 
-    def plot_input_images(self, rows=3, cols=3, figsize=(20, 20), max_images = 6, title="Input Images"):
+    def print_list_images(self):
+        print("[INFO] This is the order or your image in the list. IndexImage is the index in this table:")
+        print(DataHandler.imagePathsLabels)
+
+    def plot_input_images(self, rows=3, cols=3, figsize=(15, 5), max_images = 6, title="Input Images"):
         fig = plt.figure(figsize=figsize)
         fig.suptitle(title, fontsize=16)
 
@@ -71,7 +79,7 @@ class DataHandler:
 
         plt.show()
 
-    def plot_raw_transformation(self, zoom = True, specificIndex = None, rows=8, cols=3, figsize=(20, 20), max_images = 24, title="Raw Transformation"):
+    def plot_raw_transformation(self, zoom = True, specificIndex = None, rows=8, cols=3, figsize=(15, 5), max_images = 24, title="Raw Transformation"):
         fig, axs = plt.subplots(rows, cols, figsize=figsize)
         fig.suptitle(title, fontsize=16)
 
@@ -118,7 +126,7 @@ class DataHandler:
 
         print("[INFO] There are {} ploted images.".format(max_images))
 
-    def plot_specific_raw_transformation(self, indexImage, zoom = True, rows=2, cols=3, figsize=(20, 10), title="Raw Transformation"):
+    def plot_specific_raw_transformation(self, indexImage, zoom = True, rows=2, cols=3, figsize=(15, 5), title="Raw Transformation"):
         DataHandler.plot_raw_transformation(self, zoom, indexImage, rows, cols, figsize, -1, title)
 
     def compute_CFA_images(self, CFA_NAME):
@@ -133,6 +141,11 @@ class DataHandler:
             print("[ERROR] There is no image in imagePaths")
             return
 
+        # Test key method
+        if method not in DataHandler.reconstruction_images.keys():
+            print("[ERROR] The method {} is not valid.".format(method))
+            exit(1)
+
         nameImage = Path(DataHandler.imagePaths[indexImage]).stem
 
         if DataHandler.CFA_images.get(nameImage) is None:
@@ -141,26 +154,21 @@ class DataHandler:
 
         img = DataHandler.load_image(self, indexImage)
 
-        if method == "interpolation":
-            cfa = options.get("cfa")
+        img_CFA = DataHandler.CFA_images[nameImage].direct(img)
 
-            if cfa is None:
-                print("[ERROR] You must specify the cfa.")
-                exit(1)
+        cfa = options.get("cfa")
 
-            DataHandler.reconstruction_images[method].setdefault(nameImage, run_reconstruction(DataHandler.CFA_images[nameImage].direct(img), cfa))
+        if cfa is None:
+            print("[ERROR] You must specify the cfa.")
+            exit(1)
 
-        if method == "menon":
-            pattern = options.get("pattern")
-            refining_step = options.get("refining_step")
+        if method == "interpolation":
 
-            if (refining_step or cfa) is None:
-                print("[ERROR] You must specify the cfa and the refining step.")
-                exit(1)
+            DataHandler.reconstruction_images[method].setdefault(nameImage, run_reconstruction(img_CFA, cfa))
 
-            mask = DataHandler.CFA_images[nameImage].mask
+        if method == "menon":
 
-            DataHandler.reconstruction_images[method].setdefault(nameImage, run_reconstruction_brice_convers(DataHandler.CFA_images[nameImage].direct(img), mask, pattern, refining_step))
+            DataHandler.reconstruction_images[method].setdefault(nameImage, run_reconstruction_brice_convers(img_CFA, cfa))
 
     def compute_reconstruction_images(self, method, options = None):
 
@@ -169,27 +177,48 @@ class DataHandler:
 
         print("[INFO] There are {} images which have been reconstructed.".format(len(DataHandler.reconstruction_images[method])))
 
-    def plot_reconstructed_image(self, indexImage, method, rows=1, cols=2, figsize=(10, 10), title="Reconstructed Images"):
+    def plot_reconstructed_image(self, indexImage, method, cfa = {"cfa": "bayer"}, zoomSize = "small", rows=1, cols=4, figsize=(15, 5)):
+        # Test key method
+        if method not in DataHandler.reconstruction_images.keys():
+            print("[ERROR] The method {} is not valid.".format(method))
+            exit(1)
+
         res = DataHandler.get_reconstructed_image(self, indexImage, method)
 
         fig, axs = plt.subplots(rows, cols, figsize=figsize)
+        fig.suptitle("Reconstructed Image with method: {} and pattern type: {}".format(method, cfa["cfa"]), fontsize=16)
+
         axs[0].imshow(DataHandler.load_image(self, indexImage))
-        axs[0].set_title('Original image')
+        axs[0].set_title('Original Image')
         axs[1].imshow(res)
-        axs[1].set_title('Reconstructed image')
+        axs[1].set_title('Reconstructed Image')
+
+        if zoomSize == "small":
+            axs[2].imshow(DataHandler.load_image(self, indexImage)[800:864, 450:514])
+            axs[2].set_title('Zoomed Input Image')
+            axs[3].imshow(res[800:864, 450:514])
+            axs[3].set_title('Zoomed Reconstructed Image')
+        else:
+            axs[2].imshow(DataHandler.load_image(self, indexImage)[2000:2064, 2000:2064])
+            axs[2].set_title('Zoomed Input Image')
+            axs[3].imshow(res[2000:2064, 2000:2064])
+            axs[3].set_title('Zoomed Reconstructed Image')
 
         outputPath = os.path.join(DataHandler.WD_PATH, "output")
         folderExists(outputPath)
 
         imageName = Path(DataHandler.imagePaths[indexImage]).stem
-        plotReconstructedImagePath = os.path.join(outputPath, imageName + "_" + method + ".png")
-        plotCompImagePath = os.path.join(outputPath, "Compararison_" + imageName + "_" + method + ".png")
+        plotCompImagePath = os.path.join(outputPath, "Compararison_" + imageName + "_" + method + "_" + cfa["cfa"] + ".png")
 
         #save_image( plotReconstructedImagePath, res)
         fig.savefig(plotCompImagePath)
 
 
     def get_reconstructed_image(self, indexImage, method):
+        # Test key method
+        if method not in DataHandler.reconstruction_images.keys():
+            print("[ERROR] The method {} is not valid.".format(method))
+            exit(1)
 
         DataHandler.indexImageExists(self, indexImage)
 
diff --git a/src/methods/brice_convers/main_template.py b/src/methods/brice_convers/main_template.py
new file mode 100644
index 0000000000000000000000000000000000000000..67cad897733368cf8a89dd8a8d667bc1ef5dbe77
--- /dev/null
+++ b/src/methods/brice_convers/main_template.py
@@ -0,0 +1,39 @@
+import src.methods.brice_convers.dataHandler as DataHandler
+import src.methods.brice_convers.dataEvaluation as DataEvaluation
+import time
+
+WORKING_DIRECOTRY_PATH = "SICOM_Image_Analysis/sicom_image_analysis_project/"
+
+DataHandler = DataHandler.DataHandler(WORKING_DIRECOTRY_PATH)
+DataEvaluation = DataEvaluation.DataEvaluation(DataHandler)
+
+def main(DataHandler):
+
+    IMAGE_PATH = WORKING_DIRECOTRY_PATH + "images/"
+
+    CFA_NAME = "quad_bayer"
+
+    METHOD = "menon"
+
+    startTime = time.time()
+
+    DataHandler.list_images(IMAGE_PATH)
+
+    DataHandler.print_list_images()
+
+    DataHandler.compute_CFA_images(CFA_NAME)
+
+    DataHandler.compute_reconstruction_images(METHOD, {"cfa": CFA_NAME})
+
+    DataHandler.plot_reconstructed_image(0, METHOD, {"cfa": CFA_NAME}, zoomSize="large")
+
+    DataEvaluation.print_metrics(0, METHOD)
+
+    endTime = time.time()
+
+    print("[INFO] Elapsed time: " + str(endTime - startTime) + "s")
+
+    print("[INFO] End")
+
+if __name__ == "__main__":
+    main(DataHandler)
diff --git a/src/methods/brice_convers/menon2007.py b/src/methods/brice_convers/menon.py
similarity index 66%
rename from src/methods/brice_convers/menon2007.py
rename to src/methods/brice_convers/menon.py
index 78a3abe1a344d5bc6f358a43eda388e7bac9c895..935f80508443fc72b99159dc4c893ebca5e63124 100644
--- a/src/methods/brice_convers/menon2007.py
+++ b/src/methods/brice_convers/menon.py
@@ -18,13 +18,17 @@ from colour.utilities import as_float_array, ones, tsplit, tstack
 from scipy.ndimage.filters import convolve, convolve1d
 from src.forward_model import CFA
 
-__author__ = "Colour Developers"
-__copyright__ = "Copyright 2015 Colour Developers"
-__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
-__maintainer__ = "Colour Developers"
-__email__ = "colour-developers@colour-science.org"
-__status__ = "Production"
+def tensor_mask_to_RGB_mask(mask: ArrayLike, pixelPattern: str = "RGB"):
+        # We extract image chanels from mask
+    for i, letter in enumerate(pixelPattern):
+        if letter == "R":
+            R_m = mask[:, :, i]
+        elif letter == "G":
+            G_m = mask[:, :, i]
+        elif letter == "B":
+            B_m = mask[:, :, i]
 
+    return R_m, G_m, B_m
 
 def _cnv_h(x: ArrayLike, y: ArrayLike) -> NDArrayFloat:
     """Perform horizontal convolution."""
@@ -74,53 +78,12 @@ examples_merge_from_raw_files_with_post_demosaicing.ipynb>`__.
     References
     ----------
     :cite:`Menon2007c`
-
-    Examples
-    --------
-    >>> CFA = np.array(
-    ...     [
-    ...         [0.30980393, 0.36078432, 0.30588236, 0.3764706],
-    ...         [0.35686275, 0.39607844, 0.36078432, 0.40000001],
-    ...     ]
-    ... )
-    >>> demosaicing_CFA_Bayer_Menon2007(CFA)
-    array([[[ 0.30980393,  0.35686275,  0.39215687],
-            [ 0.30980393,  0.36078432,  0.39607844],
-            [ 0.30588236,  0.36078432,  0.39019608],
-            [ 0.32156864,  0.3764706 ,  0.40000001]],
-    <BLANKLINE>
-           [[ 0.30980393,  0.35686275,  0.39215687],
-            [ 0.30980393,  0.36078432,  0.39607844],
-            [ 0.30588236,  0.36078432,  0.39019609],
-            [ 0.32156864,  0.3764706 ,  0.40000001]]])
-    >>> CFA = np.array(
-    ...     [
-    ...         [0.3764706, 0.36078432, 0.40784314, 0.3764706],
-    ...         [0.35686275, 0.30980393, 0.36078432, 0.29803923],
-    ...     ]
-    ... )
-    >>> demosaicing_CFA_Bayer_Menon2007(CFA, "BGGR")
-    array([[[ 0.30588236,  0.35686275,  0.3764706 ],
-            [ 0.30980393,  0.36078432,  0.39411766],
-            [ 0.29607844,  0.36078432,  0.40784314],
-            [ 0.29803923,  0.3764706 ,  0.42352942]],
-    <BLANKLINE>
-           [[ 0.30588236,  0.35686275,  0.3764706 ],
-            [ 0.30980393,  0.36078432,  0.39411766],
-            [ 0.29607844,  0.36078432,  0.40784314],
-            [ 0.29803923,  0.3764706 ,  0.42352942]]])
     """
 
     # We extract image chanels from mask
-    for i, letter in enumerate(pixelPattern):
-        if letter == "R":
-            R_m = mask[:, :, i]
-        elif letter == "G":
-            G_m = mask[:, :, i]
-        elif letter == "B":
-            B_m = mask[:, :, i]
+    R_m, G_m, B_m = tensor_mask_to_RGB_mask(mask, pixelPattern)
 
-    # We extract known pixel intensities
+    # We extract known pixel intensities: when we have a zero in the mask, we have an unknown pixel intensity for the color
     R = rawImage * R_m
     G = rawImage * G_m
     B = rawImage * B_m
@@ -129,22 +92,34 @@ examples_merge_from_raw_files_with_post_demosaicing.ipynb>`__.
     h_0 = as_float_array([0.0, 0.5, 0.0, 0.5, 0.0])
     h_1 = as_float_array([-0.25, 0.0, 0.5, 0.0, -0.25])
 
-    # Green components interpolation along both horizontal and veritcal directions
+    # Green components interpolation along both horizontal and veritcal directions:
+    # For each unkown green pixel, we compute the gradient along both horizontal and vertical directions
     G_H = np.where(G_m == 0, _cnv_h(rawImage, h_0) + _cnv_h(rawImage, h_1), G)
     G_V = np.where(G_m == 0, _cnv_v(rawImage, h_0) + _cnv_v(rawImage, h_1), G)
 
     # We calculate the chrominance differences along both horizontal and vertical directions
+    # For each unknown red and blue pixel, we compute the difference between the pixel intensity and the horizontal green component
     C_H = np.where(R_m == 1, R - G_H, 0)
     C_H = np.where(B_m == 1, B - G_H, C_H)
 
+    # Sale method with vertical green component
     C_V = np.where(R_m == 1, R - G_V, 0)
     C_V = np.where(B_m == 1, B - G_V, C_V)
 
     # We compute the directional gradients along both horizontal and vertical directions
-    D_H = np.abs(C_H - np.pad(C_H, ((0, 0), (0, 2)), mode="reflect")[:, 2:])
-    D_V = np.abs(C_V - np.pad(C_V, ((0, 2), (0, 0)), mode="reflect")[2:, :])
+    # First we pad our arrayes with zeros to avoid boundary effects. Acxtually, we pad with the last value of the array
+    # We add two columns to the right of the horizontal array and two rows at the bottom of the  vertical array, with the reflect mode.
+    # Then we remove the first two columns of the horizontal array and the first two rows of the vertical array.
+    paded_D_H = np.pad(C_H, ((0, 0), (0, 2)), mode="reflect")[:, 2:]
+    paded_D_V = np.pad(C_V, ((0, 2), (0, 0)), mode="reflect")[2:, :]
 
-    del h_0, h_1, C_V, C_H
+    # We compute the difference between the original array and the padded array.
+    # With the paded array, we have a difference between each pixel and the right neigborhood. We do not have issue with boundaries.
+    # It gives a measure of pixel intensity variation along the horizontal and vertical directions.
+    D_H = np.abs(C_H - paded_D_H)
+    D_V = np.abs(C_V - paded_D_V)
+
+    del h_0, h_1, C_V, C_H, paded_D_V, paded_D_H
 
     # We define a sufficiently large neighborhood with a size of (5, 5).
     k = as_float_array(
@@ -157,39 +132,46 @@ examples_merge_from_raw_files_with_post_demosaicing.ipynb>`__.
         ]
     )
 
+    # We convolve the difference component with the neighborhood. This method is used to highlight directional variations in the image, in two direction.
     d_H = convolve(D_H, k, mode="constant")
     d_V = convolve(D_V, np.transpose(k), mode="constant")
 
     del D_H, D_V
 
-    # We estimate the green channel with our classifiers
+    # We estimate the green channel with our classifier
     mask = d_V >= d_H
     G = np.where(mask, G_H, G_V)
+    # We estimate the mask which represents the best directional reconstruction
     M = np.where(mask, 1, 0)
 
     del d_H, d_V, G_H, G_V
 
-    # The, we estimate the red and blue channels
+    ## The, we estimate the red and blue channels
 
-    # Red rows.
+    # We arrays with ones at the line where there is at least one red (blue) pixel in the red (blue) mask
     R_r = np.transpose(np.any(R_m == 1, axis=1)[None]) * ones(R.shape)
-    # Blue rows.
     B_r = np.transpose(np.any(B_m == 1, axis=1)[None]) * ones(B.shape)
 
+    # We define a new filter
     k_b = as_float_array([0.5, 0, 0.5])
 
+    # We fill R array with the condition: if we are in a line where there is at least one red pixel in the red mask and we are on a green pixel in the green mask, we apply the filter horizontaly to the red channel.
+    # If not it means we are on a red pixel (only two possiblity) in the red mask, so we do not apply the filter because we know the red pixel
     R = np.where(
         np.logical_and(G_m == 1, R_r == 1),
         G + _cnv_h(R, k_b) - _cnv_h(G, k_b),
         R,
     )
 
+    # Same but we test only the line where there is at least one blue pixel in the blue mask.
+    # When the condition is true, we apply the filter vertically because this time red pixel are aline vertically.
     R = np.where(
         np.logical_and(G_m == 1, B_r == 1) == 1,
         G + _cnv_v(R, k_b) - _cnv_v(G, k_b),
         R,
     )
 
+    # It is the same logic for the blue image
     B = np.where(
         np.logical_and(G_m == 1, B_r == 1),
         G + _cnv_h(B, k_b) - _cnv_h(G, k_b),
@@ -202,17 +184,22 @@ examples_merge_from_raw_files_with_post_demosaicing.ipynb>`__.
         B,
     )
 
-    # We use again our classifiers with matrix M to estimate the red and blue channels according the most likely direction
-    R = np.where(
-        np.logical_and(B_r == 1, B_m == 1),
-        np.where(
+    # To finish R image we need to interpolate blue pixel. We use M to know wich direction is the best and then we interpolate the blue pixel with the filter.
+    R_b = np.where(
             M == 1,
             B + _cnv_h(R, k_b) - _cnv_h(B, k_b),
             B + _cnv_v(R, k_b) - _cnv_v(B, k_b),
-        ),
+        )
+
+    # Then we put the condition: if we are on a line where there is at least one blue pixel and we are on a blue pixel we take the previous interpolated value.
+    # If not we know the red pixel value and we keep it.
+    R = np.where(
+        np.logical_and(B_r == 1, B_m == 1),
+        R_b,
         R,
     )
 
+    # Same idea for the blue image.
     B = np.where(
         np.logical_and(R_r == 1, R_m == 1),
         np.where(
@@ -257,7 +244,7 @@ def refining_step_Menon2007(
     :class:`numpy.ndarray`
         Refined *RGB* colourspace array.
     """
-
+    # Unpacking the RGB and RGB_m arrays.
     R, G, B = tsplit(RGB)
     R_m, G_m, B_m = tsplit(RGB_m)
     M = as_float_array(M)
@@ -268,13 +255,17 @@ def refining_step_Menon2007(
     R_G = R - G
     B_G = B - G
 
+    # Definition of the low-pass filter.
     FIR = ones(3) / 3
 
+    # When we are on a blue pixel, we convolve the pixel with the filter in function of the best direction.
     B_G_m = np.where(
         B_m == 1,
         np.where(M == 1, _cnv_h(B_G, FIR), _cnv_v(B_G, FIR)),
         0,
     )
+
+    # Same for the red pixel.
     R_G_m = np.where(
         R_m == 1,
         np.where(M == 1, _cnv_h(R_G, FIR), _cnv_v(R_G, FIR)),
@@ -283,17 +274,19 @@ def refining_step_Menon2007(
 
     del B_G, R_G
 
+    # We update the green component for known red and blue pixels with the difference between the red or blue pixel intensity and the filtered pixel intensity.
     G = np.where(R_m == 1, R - R_G_m, G)
     G = np.where(B_m == 1, B - B_G_m, G)
 
     # Updating of the red and blue components in the green locations.
-    # Red rows.
+
+    # R_r is an array with ones at the line where there is at least one red pixel in the red mask.
     R_r = np.transpose(np.any(R_m == 1, axis=1)[None]) * ones(R.shape)
-    # Red columns.
+    # R_c is an array with ones at the column where there is at least one red pixel in the red mask.
     R_c = np.any(R_m == 1, axis=0)[None] * ones(R.shape)
-    # Blue rows.
+    # B_r is an array with ones at the line where there is at least one blue pixel in the blue mask.
     B_r = np.transpose(np.any(B_m == 1, axis=1)[None]) * ones(B.shape)
-    # Blue columns.
+    # B_c is an array with ones at the column where there is at least one blue pixel in the blue mask.
     B_c = np.any(B_m == 1, axis=0)[None] * ones(B.shape)
 
     R_G = R - G
diff --git a/src/methods/brice_convers/reconstruct.py b/src/methods/brice_convers/reconstruct.py
index c9319ae4197759ca0f6e81b8a2598338500b0c9e..7e207e2ac52c51c944c3159f8444818fcb1c1492 100755
--- a/src/methods/brice_convers/reconstruct.py
+++ b/src/methods/brice_convers/reconstruct.py
@@ -5,10 +5,11 @@ Students can call their functions (declared in others files of src/methods/your_
 
 
 import numpy as np
-
+import cv2
 from src.forward_model import CFA
-from src.methods.brice_convers.menon2007 import demosaicing_CFA_Bayer_Menon2007
+from src.methods.brice_convers.menon import demosaicing_CFA_Bayer_Menon2007
 import src.methods.brice_convers.configuration as configuration
+from src.methods.brice_convers.utilities import quad_bayer_to_bayer
 
 
 def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
@@ -23,9 +24,18 @@ def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
     """
 
     input_shape = (y.shape[0], y.shape[1], 3)
-    mask = CFA(cfa, input_shape)
+    op = CFA("bayer", input_shape)
+
+    if cfa == "quad_bayer":
+        y = quad_bayer_to_bayer(y)
+        op = CFA("bayer", y.shape)
+
+    reconstructed_image = demosaicing_CFA_Bayer_Menon2007(y, op.mask, configuration.PIXEL_PATTERN, configuration.REFINING_STEP)
 
-    return demosaicing_CFA_Bayer_Menon2007(y, mask, configuration.PIXEL_PATTERN, configuration.REFINING_STEP)
+    if cfa == "quad_bayer":
+        return cv2.resize(reconstructed_image, input_shape[:2], interpolation=cv2.INTER_CUBIC)
+    else:
+        return reconstructed_image
 
 ####
 ####
diff --git a/src/methods/brice_convers/utilities.py b/src/methods/brice_convers/utilities.py
index 5735b2164c7f19e7ad8a1368fbd1b6e1182f461b..2e86af5f4b28bb6dec7ff2a8745106bbece341a5 100644
--- a/src/methods/brice_convers/utilities.py
+++ b/src/methods/brice_convers/utilities.py
@@ -1,4 +1,5 @@
 import os
+import cv2
 
 def folderExists(path):
 	CHECK_FOLDER = os.path.isdir(path)
@@ -7,3 +8,40 @@ def folderExists(path):
 	if not CHECK_FOLDER:
 		os.makedirs(path)
 		print("[DATA] You created a new folder : " +  str(path))
+
+import numpy as np
+
+def quad_bayer_to_bayer(quad_bayer_pattern):
+    # We test that thee quad bayer size fit with a multiple of 2 for width and height). If not, we pad it.
+	if quad_bayer_pattern.shape[0] % 2 != 0 or quad_bayer_pattern.shape[1] % 2 != 0:
+		print("[INFO] The quad bayer pattern size is not valid. We need to pad it.")
+
+		pad_schema = []
+
+		if quad_bayer_pattern.shape[0] % 2 != 0:
+				pad_schema.append([0, 1])
+
+		if quad_bayer_pattern.shape[1] % 2 != 0:
+				pad_schema.append([0, 1])
+		else:
+				pad_schema.append([0, 0])
+
+		quad_bayer_pattern = np.pad(quad_bayer_pattern, pad_schema, mode="reflect")[:, 2:]
+
+	# we create a new bayer pattern with the good size
+	bayer_pattern = np.zeros((quad_bayer_pattern.shape[0] // 2, quad_bayer_pattern.shape[1] // 2))
+
+	# We combine adjacent pixels to create the Bayer pattern
+	for i in range(0, quad_bayer_pattern.shape[0], 2):
+		for j in range(0, quad_bayer_pattern.shape[1], 2):
+			bayer_pattern[i // 2, j // 2] = (
+			quad_bayer_pattern[i, j] +
+			quad_bayer_pattern[i, j + 1] +
+			quad_bayer_pattern[i + 1, j] +
+			quad_bayer_pattern[i + 1, j + 1]
+		) / 4
+
+	# We resize bayer iamge to the original image size
+
+	#return cv2.resize(bayer_pattern, quad_bayer_pattern.shape, interpolation=cv2.INTER_CUBIC)
+	return bayer_pattern