Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • samanost/sicom_image_analysis_project
  • gerayelk/sicom_image_analysis_project
  • jelassiy/sicom_image_analysis_project
  • chardoto/sicom_image_analysis_project
  • chaarim/sicom_image_analysis_project
  • domers/sicom_image_analysis_project
  • elmurrt/sicom_image_analysis_project
  • sadonest/sicom_image_analysis_project
  • kouddann/sicom_image_analysis_project
  • mirabitj/sicom-image-analysis-project-mirabito
  • plotj/sicom_image_analysis_project
  • torrem/sicom-image-analysis-project-maxime-torre
  • dzike/sicom_image_analysis_project
  • daip/sicom_image_analysis_project
  • casanovv/sicom_image_analysis_project
  • girmarti/sicom_image_analysis_project
  • lioretn/sicom_image_analysis_project
  • lemoinje/sicom_image_analysis_project
  • ouahmanf/sicom_image_analysis_project
  • vouilloa/sicom_image_analysis_project
  • diopb/sicom_image_analysis_project
  • davidale/sicom_image_analysis_project
  • enza/sicom_image_analysis_project
  • conversb/sicom_image_analysis_project
  • mullemat/sicom_image_analysis_project
25 results
Show changes
Commits on Source (163)
Showing
with 1647 additions and 585 deletions
[
{
"label": "numpy",
"kind": 6,
"isExtraImport": true,
"importPath": "numpy",
"description": "numpy",
"detail": "numpy",
"documentation": {}
},
{
"label": "convolve2d",
"importPath": "scipy.signal",
"description": "scipy.signal",
"isExtraImport": true,
"detail": "scipy.signal",
"documentation": {}
},
{
"label": "CFA",
"importPath": "src.forward_model",
"description": "src.forward_model",
"isExtraImport": true,
"detail": "src.forward_model",
"documentation": {}
},
{
"label": "CFA",
"importPath": "src.forward_model",
"description": "src.forward_model",
"isExtraImport": true,
"detail": "src.forward_model",
"documentation": {}
},
{
"label": "CFA",
"importPath": "src.forward_model",
"description": "src.forward_model",
"isExtraImport": true,
"detail": "src.forward_model",
"documentation": {}
},
{
"label": "naive_interpolation",
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"isExtraImport": true,
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "*",
"importPath": "src.methods.ELAMRANI_Mouna.functions",
"description": "src.methods.ELAMRANI_Mouna.functions",
"isExtraImport": true,
"detail": "src.methods.ELAMRANI_Mouna.functions",
"documentation": {}
},
{
"label": "exists",
"importPath": "os.path",
"description": "os.path",
"isExtraImport": true,
"detail": "os.path",
"documentation": {}
},
{
"label": "check_cfa",
"importPath": "src.checks",
"description": "src.checks",
"isExtraImport": true,
"detail": "src.checks",
"documentation": {}
},
{
"label": "check_rgb",
"importPath": "src.checks",
"description": "src.checks",
"isExtraImport": true,
"detail": "src.checks",
"documentation": {}
},
{
"label": "check_data_range",
"importPath": "src.checks",
"description": "src.checks",
"isExtraImport": true,
"detail": "src.checks",
"documentation": {}
},
{
"label": "check_rgb",
"importPath": "src.checks",
"description": "src.checks",
"isExtraImport": true,
"detail": "src.checks",
"documentation": {}
},
{
"label": "check_shape",
"importPath": "src.checks",
"description": "src.checks",
"isExtraImport": true,
"detail": "src.checks",
"documentation": {}
},
{
"label": "check_path",
"importPath": "src.checks",
"description": "src.checks",
"isExtraImport": true,
"detail": "src.checks",
"documentation": {}
},
{
"label": "check_png",
"importPath": "src.checks",
"description": "src.checks",
"isExtraImport": true,
"detail": "src.checks",
"documentation": {}
},
{
"label": "peak_signal_noise_ratio",
"importPath": "skimage.metrics",
"description": "skimage.metrics",
"isExtraImport": true,
"detail": "skimage.metrics",
"documentation": {}
},
{
"label": "structural_similarity",
"importPath": "skimage.metrics",
"description": "skimage.metrics",
"isExtraImport": true,
"detail": "skimage.metrics",
"documentation": {}
},
{
"label": "imread",
"importPath": "skimage.io",
"description": "skimage.io",
"isExtraImport": true,
"detail": "skimage.io",
"documentation": {}
},
{
"label": "imsave",
"importPath": "skimage.io",
"description": "skimage.io",
"isExtraImport": true,
"detail": "skimage.io",
"documentation": {}
},
{
"label": "naive_interpolation",
"kind": 2,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "def naive_interpolation(op: CFA, y: np.ndarray) -> np.ndarray:\n \"\"\"Performs a simple interpolation of the lost pixels.\n Args:\n op (CFA): CFA operator.\n y (np.ndarray): Mosaicked image.\n Returns:\n np.ndarray: Demosaicked image.\n \"\"\"\n z = op.adjoint(y)\n if op.cfa == 'bayer':",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "extract_padded",
"kind": 2,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "def extract_padded(M, size, i, j):\n N_i, N_j = M.shape\n res = np.zeros((size, size))\n middle_size = int((size - 1) / 2)\n for ii in range(- middle_size, middle_size + 1):\n for jj in range(- middle_size, middle_size + 1):\n if i + ii >= 0 and i + ii < N_i and j + jj >= 0 and j + jj < N_j:\n res[middle_size + ii, middle_size + jj] = M[i + ii, j + jj]\n return res\ndef varying_kernel_convolution(M, K_list):",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "varying_kernel_convolution",
"kind": 2,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "def varying_kernel_convolution(M, K_list):\n N_i, N_j = M.shape\n res = np.zeros_like(M)\n for i in range(N_i):\n for j in range(N_j):\n res[i, j] = np.sum(extract_padded(M, K_list[4 * (i % 4) + j % 4].shape[0], i, j) * K_list[4 * (i % 4) + j % 4])\n np.clip(res, 0, 1, res)\n return res\nK_identity = np.zeros((5, 5))\nK_identity[2, 2] = 1",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_identity",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_identity = np.zeros((5, 5))\nK_identity[2, 2] = 1\nK_red_0 = np.zeros((5, 5))\nK_red_0[2, :] = np.array([-3, 13, 0, 0, 2]) / 12\nK_red_1 = np.zeros((5, 5))\nK_red_1[2, :] = np.array([2, 0, 0, 13, -3]) / 12\nK_red_8 = np.zeros((5, 5))\nK_red_8[:2, :2] = np.array([[-1, -1], [-1, 9]]) / 6\nK_red_9 = np.zeros((5, 5))\nK_red_9[:2, 3:] = np.array([[-1, -1], [9, -1]]) / 6",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_red_0",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_red_0 = np.zeros((5, 5))\nK_red_0[2, :] = np.array([-3, 13, 0, 0, 2]) / 12\nK_red_1 = np.zeros((5, 5))\nK_red_1[2, :] = np.array([2, 0, 0, 13, -3]) / 12\nK_red_8 = np.zeros((5, 5))\nK_red_8[:2, :2] = np.array([[-1, -1], [-1, 9]]) / 6\nK_red_9 = np.zeros((5, 5))\nK_red_9[:2, 3:] = np.array([[-1, -1], [9, -1]]) / 6\nK_red_10 = np.zeros((5, 5))\nK_red_10[:, 2] = np.array([-3, 13, 0, 0, 2]) / 12",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_red_1",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_red_1 = np.zeros((5, 5))\nK_red_1[2, :] = np.array([2, 0, 0, 13, -3]) / 12\nK_red_8 = np.zeros((5, 5))\nK_red_8[:2, :2] = np.array([[-1, -1], [-1, 9]]) / 6\nK_red_9 = np.zeros((5, 5))\nK_red_9[:2, 3:] = np.array([[-1, -1], [9, -1]]) / 6\nK_red_10 = np.zeros((5, 5))\nK_red_10[:, 2] = np.array([-3, 13, 0, 0, 2]) / 12\nK_red_12 = np.zeros((5, 5))\nK_red_12[3:, :2] = np.array([[-1, 9], [-1, -1]]) / 6",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_red_8",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_red_8 = np.zeros((5, 5))\nK_red_8[:2, :2] = np.array([[-1, -1], [-1, 9]]) / 6\nK_red_9 = np.zeros((5, 5))\nK_red_9[:2, 3:] = np.array([[-1, -1], [9, -1]]) / 6\nK_red_10 = np.zeros((5, 5))\nK_red_10[:, 2] = np.array([-3, 13, 0, 0, 2]) / 12\nK_red_12 = np.zeros((5, 5))\nK_red_12[3:, :2] = np.array([[-1, 9], [-1, -1]]) / 6\nK_red_13 = np.zeros((5, 5))\nK_red_13[3:, 3:] = np.array([[9, -1], [-1, -1]]) / 6",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_red_9",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_red_9 = np.zeros((5, 5))\nK_red_9[:2, 3:] = np.array([[-1, -1], [9, -1]]) / 6\nK_red_10 = np.zeros((5, 5))\nK_red_10[:, 2] = np.array([-3, 13, 0, 0, 2]) / 12\nK_red_12 = np.zeros((5, 5))\nK_red_12[3:, :2] = np.array([[-1, 9], [-1, -1]]) / 6\nK_red_13 = np.zeros((5, 5))\nK_red_13[3:, 3:] = np.array([[9, -1], [-1, -1]]) / 6\nK_red_14 = np.zeros((5, 5))\nK_red_14[:, 2] = np.array([2, 0, 0, 13, -3]) / 12",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_red_10",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_red_10 = np.zeros((5, 5))\nK_red_10[:, 2] = np.array([-3, 13, 0, 0, 2]) / 12\nK_red_12 = np.zeros((5, 5))\nK_red_12[3:, :2] = np.array([[-1, 9], [-1, -1]]) / 6\nK_red_13 = np.zeros((5, 5))\nK_red_13[3:, 3:] = np.array([[9, -1], [-1, -1]]) / 6\nK_red_14 = np.zeros((5, 5))\nK_red_14[:, 2] = np.array([2, 0, 0, 13, -3]) / 12\nK_list_red = [K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_8, K_red_9, K_red_10, K_red_10, K_red_12, K_red_13, K_red_14, K_red_14]\nK_green_2 = np.zeros((5, 5))",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_red_12",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_red_12 = np.zeros((5, 5))\nK_red_12[3:, :2] = np.array([[-1, 9], [-1, -1]]) / 6\nK_red_13 = np.zeros((5, 5))\nK_red_13[3:, 3:] = np.array([[9, -1], [-1, -1]]) / 6\nK_red_14 = np.zeros((5, 5))\nK_red_14[:, 2] = np.array([2, 0, 0, 13, -3]) / 12\nK_list_red = [K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_8, K_red_9, K_red_10, K_red_10, K_red_12, K_red_13, K_red_14, K_red_14]\nK_green_2 = np.zeros((5, 5))\nK_green_2[2, :] = [-3, 13, 0, 0, 2]\nK_green_2[:, 2] = [-3, 13, 0, 0, 2]",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_red_13",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_red_13 = np.zeros((5, 5))\nK_red_13[3:, 3:] = np.array([[9, -1], [-1, -1]]) / 6\nK_red_14 = np.zeros((5, 5))\nK_red_14[:, 2] = np.array([2, 0, 0, 13, -3]) / 12\nK_list_red = [K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_8, K_red_9, K_red_10, K_red_10, K_red_12, K_red_13, K_red_14, K_red_14]\nK_green_2 = np.zeros((5, 5))\nK_green_2[2, :] = [-3, 13, 0, 0, 2]\nK_green_2[:, 2] = [-3, 13, 0, 0, 2]\nK_green_2 = K_green_2 / 24\nK_green_3 = np.zeros((5, 5))",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_red_14",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_red_14 = np.zeros((5, 5))\nK_red_14[:, 2] = np.array([2, 0, 0, 13, -3]) / 12\nK_list_red = [K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_8, K_red_9, K_red_10, K_red_10, K_red_12, K_red_13, K_red_14, K_red_14]\nK_green_2 = np.zeros((5, 5))\nK_green_2[2, :] = [-3, 13, 0, 0, 2]\nK_green_2[:, 2] = [-3, 13, 0, 0, 2]\nK_green_2 = K_green_2 / 24\nK_green_3 = np.zeros((5, 5))\nK_green_3[2, :] = [2, 0, 0, 13, -3]\nK_green_3[:, 2] = [-3, 13, 0, 0, 2]",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_list_red",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_list_red = [K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_8, K_red_9, K_red_10, K_red_10, K_red_12, K_red_13, K_red_14, K_red_14]\nK_green_2 = np.zeros((5, 5))\nK_green_2[2, :] = [-3, 13, 0, 0, 2]\nK_green_2[:, 2] = [-3, 13, 0, 0, 2]\nK_green_2 = K_green_2 / 24\nK_green_3 = np.zeros((5, 5))\nK_green_3[2, :] = [2, 0, 0, 13, -3]\nK_green_3[:, 2] = [-3, 13, 0, 0, 2]\nK_green_3 = K_green_3 / 24\nK_green_6 = np.zeros((5, 5))",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_green_2",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_green_2 = np.zeros((5, 5))\nK_green_2[2, :] = [-3, 13, 0, 0, 2]\nK_green_2[:, 2] = [-3, 13, 0, 0, 2]\nK_green_2 = K_green_2 / 24\nK_green_3 = np.zeros((5, 5))\nK_green_3[2, :] = [2, 0, 0, 13, -3]\nK_green_3[:, 2] = [-3, 13, 0, 0, 2]\nK_green_3 = K_green_3 / 24\nK_green_6 = np.zeros((5, 5))\nK_green_6[2, :] = [-3, 13, 0, 0, 2]",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_green_2",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_green_2 = K_green_2 / 24\nK_green_3 = np.zeros((5, 5))\nK_green_3[2, :] = [2, 0, 0, 13, -3]\nK_green_3[:, 2] = [-3, 13, 0, 0, 2]\nK_green_3 = K_green_3 / 24\nK_green_6 = np.zeros((5, 5))\nK_green_6[2, :] = [-3, 13, 0, 0, 2]\nK_green_6[:, 2] = [2, 0, 0, 13, -3]\nK_green_6 = K_green_6 / 24\nK_green_7 = np.zeros((5, 5))",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_green_3",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_green_3 = np.zeros((5, 5))\nK_green_3[2, :] = [2, 0, 0, 13, -3]\nK_green_3[:, 2] = [-3, 13, 0, 0, 2]\nK_green_3 = K_green_3 / 24\nK_green_6 = np.zeros((5, 5))\nK_green_6[2, :] = [-3, 13, 0, 0, 2]\nK_green_6[:, 2] = [2, 0, 0, 13, -3]\nK_green_6 = K_green_6 / 24\nK_green_7 = np.zeros((5, 5))\nK_green_7[2, :] = [2, 0, 0, 13, -3]",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_green_3",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_green_3 = K_green_3 / 24\nK_green_6 = np.zeros((5, 5))\nK_green_6[2, :] = [-3, 13, 0, 0, 2]\nK_green_6[:, 2] = [2, 0, 0, 13, -3]\nK_green_6 = K_green_6 / 24\nK_green_7 = np.zeros((5, 5))\nK_green_7[2, :] = [2, 0, 0, 13, -3]\nK_green_7[:, 2] = [2, 0, 0, 13, -3]\nK_green_7 = K_green_7 / 24\nK_list_green = [K_identity, K_identity, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_identity, K_identity]",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_green_6",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_green_6 = np.zeros((5, 5))\nK_green_6[2, :] = [-3, 13, 0, 0, 2]\nK_green_6[:, 2] = [2, 0, 0, 13, -3]\nK_green_6 = K_green_6 / 24\nK_green_7 = np.zeros((5, 5))\nK_green_7[2, :] = [2, 0, 0, 13, -3]\nK_green_7[:, 2] = [2, 0, 0, 13, -3]\nK_green_7 = K_green_7 / 24\nK_list_green = [K_identity, K_identity, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_identity, K_identity]\nK_list_blue = [K_red_10, K_red_10, K_red_8, K_red_9, K_red_14, K_red_14, K_red_12, K_red_13, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1]",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_green_6",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_green_6 = K_green_6 / 24\nK_green_7 = np.zeros((5, 5))\nK_green_7[2, :] = [2, 0, 0, 13, -3]\nK_green_7[:, 2] = [2, 0, 0, 13, -3]\nK_green_7 = K_green_7 / 24\nK_list_green = [K_identity, K_identity, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_identity, K_identity]\nK_list_blue = [K_red_10, K_red_10, K_red_8, K_red_9, K_red_14, K_red_14, K_red_12, K_red_13, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1]\nker_bayer_red_blue = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 4\nker_bayer_green = np.array([[0, 1, 0], [1, 4, 1], [0, 1, 0]]) / 4\n####",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_green_7",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_green_7 = np.zeros((5, 5))\nK_green_7[2, :] = [2, 0, 0, 13, -3]\nK_green_7[:, 2] = [2, 0, 0, 13, -3]\nK_green_7 = K_green_7 / 24\nK_list_green = [K_identity, K_identity, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_identity, K_identity]\nK_list_blue = [K_red_10, K_red_10, K_red_8, K_red_9, K_red_14, K_red_14, K_red_12, K_red_13, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1]\nker_bayer_red_blue = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 4\nker_bayer_green = np.array([[0, 1, 0], [1, 4, 1], [0, 1, 0]]) / 4\n####\n####",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_green_7",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_green_7 = K_green_7 / 24\nK_list_green = [K_identity, K_identity, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_identity, K_identity]\nK_list_blue = [K_red_10, K_red_10, K_red_8, K_red_9, K_red_14, K_red_14, K_red_12, K_red_13, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1]\nker_bayer_red_blue = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 4\nker_bayer_green = np.array([[0, 1, 0], [1, 4, 1], [0, 1, 0]]) / 4\n####\n####\n####\n#### #### #### #############\n#### ###### #### ##################",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_list_green",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_list_green = [K_identity, K_identity, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_identity, K_identity]\nK_list_blue = [K_red_10, K_red_10, K_red_8, K_red_9, K_red_14, K_red_14, K_red_12, K_red_13, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1]\nker_bayer_red_blue = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 4\nker_bayer_green = np.array([[0, 1, 0], [1, 4, 1], [0, 1, 0]]) / 4\n####\n####\n####\n#### #### #### #############\n#### ###### #### ##################\n#### ######## #### ####################",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "K_list_blue",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "K_list_blue = [K_red_10, K_red_10, K_red_8, K_red_9, K_red_14, K_red_14, K_red_12, K_red_13, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1]\nker_bayer_red_blue = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 4\nker_bayer_green = np.array([[0, 1, 0], [1, 4, 1], [0, 1, 0]]) / 4\n####\n####\n####\n#### #### #### #############\n#### ###### #### ##################\n#### ######## #### ####################\n#### ########## #### #### ########",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "ker_bayer_red_blue",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "ker_bayer_red_blue = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 4\nker_bayer_green = np.array([[0, 1, 0], [1, 4, 1], [0, 1, 0]]) / 4\n####\n####\n####\n#### #### #### #############\n#### ###### #### ##################\n#### ######## #### ####################\n#### ########## #### #### ########\n#### ############ #### #### ####",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "ker_bayer_green",
"kind": 5,
"importPath": "src.methods.baseline.demo_reconstruction",
"description": "src.methods.baseline.demo_reconstruction",
"peekOfCode": "ker_bayer_green = np.array([[0, 1, 0], [1, 4, 1], [0, 1, 0]]) / 4\n####\n####\n####\n#### #### #### #############\n#### ###### #### ##################\n#### ######## #### ####################\n#### ########## #### #### ########\n#### ############ #### #### ####\n#### #### ######## #### #### ####",
"detail": "src.methods.baseline.demo_reconstruction",
"documentation": {}
},
{
"label": "run_reconstruction",
"kind": 2,
"importPath": "src.methods.baseline.reconstruct",
"description": "src.methods.baseline.reconstruct",
"peekOfCode": "def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:\n \"\"\"Performs demosaicking on y.\n Args:\n y (np.ndarray): Mosaicked image to be reconstructed.\n cfa (str): Name of the CFA. Can be bayer or quad_bayer.\n Returns:\n np.ndarray: Demosaicked image.\n \"\"\"\n input_shape = (y.shape[0], y.shape[1], 3)\n op = CFA(cfa, input_shape)",
"detail": "src.methods.baseline.reconstruct",
"documentation": {}
},
{
"label": "find_Knearest_neighbors",
"kind": 2,
"importPath": "src.methods.ELAMRANI_Mouna.functions",
"description": "src.methods.ELAMRANI_Mouna.functions",
"peekOfCode": "def find_Knearest_neighbors(z, chan, i, j, N, M):\n \"\"\"Finds all the neighbors of a pixel on a given channel\"\"\"\n return np.array([z[(i+di)%N, (j+dj)%M, chan] for di in range(-1, 2) for dj in range(-1, 2)])\ndef calculate_directional_gradients(neighbors):\n \"\"\"Calculates the directional derivative of a pixel\"\"\"\n P1, P2, P3, P4, P5, P6, P7, P8, P9 = neighbors\n Dx, Dy = (P4 - P6)/2, (P2 - P8)/2\n Dxd, Dyd = (P3 - P7)/(2*np.sqrt(2)), (P1 - P9)/(2*np.sqrt(2))\n return [Dx, Dy, Dxd, Dyd]\ndef calculate_adaptive_weights(z, neigh, dir_deriv,chan,i,j,N,M):",
"detail": "src.methods.ELAMRANI_Mouna.functions",
"documentation": {}
},
{
"label": "calculate_directional_gradients",
"kind": 2,
"importPath": "src.methods.ELAMRANI_Mouna.functions",
"description": "src.methods.ELAMRANI_Mouna.functions",
"peekOfCode": "def calculate_directional_gradients(neighbors):\n \"\"\"Calculates the directional derivative of a pixel\"\"\"\n P1, P2, P3, P4, P5, P6, P7, P8, P9 = neighbors\n Dx, Dy = (P4 - P6)/2, (P2 - P8)/2\n Dxd, Dyd = (P3 - P7)/(2*np.sqrt(2)), (P1 - P9)/(2*np.sqrt(2))\n return [Dx, Dy, Dxd, Dyd]\ndef calculate_adaptive_weights(z, neigh, dir_deriv,chan,i,j,N,M):\n \"\"\"Finds all the neighbors of a pixel on a given channel\"\"\"\n [Dx,Dy,Dxd,Dyd] = dir_deriv\n [P1,P2,P3,P4,P5,P6,P7,P8,P9] = neigh",
"detail": "src.methods.ELAMRANI_Mouna.functions",
"documentation": {}
},
{
"label": "calculate_adaptive_weights",
"kind": 2,
"importPath": "src.methods.ELAMRANI_Mouna.functions",
"description": "src.methods.ELAMRANI_Mouna.functions",
"peekOfCode": "def calculate_adaptive_weights(z, neigh, dir_deriv,chan,i,j,N,M):\n \"\"\"Finds all the neighbors of a pixel on a given channel\"\"\"\n [Dx,Dy,Dxd,Dyd] = dir_deriv\n [P1,P2,P3,P4,P5,P6,P7,P8,P9] = neigh\n E = []\n c = 1\n for k in range (-1,2):\n for k in range (-1,2):\n n = find_Knearest_neighbors(z,chan,i+k,j+k,N,M)\n dd = calculate_directional_gradients(n)",
"detail": "src.methods.ELAMRANI_Mouna.functions",
"documentation": {}
},
{
"label": "interpolate_pixel",
"kind": 2,
"importPath": "src.methods.ELAMRANI_Mouna.functions",
"description": "src.methods.ELAMRANI_Mouna.functions",
"peekOfCode": "def interpolate_pixel(neigh,weights):\n \"\"\"interpolates pixels from a grid where one of two pixels is missing regularly spaced\"\"\"\n [P1,P2,P3,P4,P5,P6,P7,P8,P9] = neigh\n [E1,E2,E3,E4,E6,E7,E8,E9] = weights\n num5 = E2*P2 + E4*P4 + E6*P6 + E8*P8\n den5 = E2 + E4 + E6 + E8\n I5 = num5/den5\n return I5\ndef interpolate_RedBlue(neighbors, neighbors_G, weights):\n \"\"\"Interpolates the central missing pixel from the red or blue channel from a Bayer pattern.\"\"\"",
"detail": "src.methods.ELAMRANI_Mouna.functions",
"documentation": {}
},
{
"label": "interpolate_RedBlue",
"kind": 2,
"importPath": "src.methods.ELAMRANI_Mouna.functions",
"description": "src.methods.ELAMRANI_Mouna.functions",
"peekOfCode": "def interpolate_RedBlue(neighbors, neighbors_G, weights):\n \"\"\"Interpolates the central missing pixel from the red or blue channel from a Bayer pattern.\"\"\"\n [P1,P2,P3,P4,P5,P6,P7,P8,P9] = neighbors\n [G1,G2,G3,G4,G5,G6,G7,G8,G9] = neighbors_G\n [E1,E2,E3,E4,E6,E7,E8,E9] = weights\n num5 = ((E1*P1)/G1) + ((E3*P3)/G3) + ((E7*P7)/G7) + ((E9*P9)/G9)\n den5 = E1 + E3 + E7 + E9\n I5 = G5 * num5/den5\n return I5",
"detail": "src.methods.ELAMRANI_Mouna.functions",
"documentation": {}
},
{
"label": "run_reconstruction",
"kind": 2,
"importPath": "src.methods.ELAMRANI_Mouna.reconstruct",
"description": "src.methods.ELAMRANI_Mouna.reconstruct",
"peekOfCode": "def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:\n \"\"\"Performs demosaicking on y.\n Args:\n y (np.ndarray): Mosaicked image to be reconstructed.\n cfa (str): Name of the CFA. Can be bayer or quad_bayer.\n Returns:\n np.ndarray: Demosaicked image.\n \"\"\"\n # Define constants and operators\n cfa_name = 'bayer' # bayer or quad_bayer",
"detail": "src.methods.ELAMRANI_Mouna.reconstruct",
"documentation": {}
},
{
"label": "check_path",
"kind": 2,
"importPath": "src.checks",
"description": "src.checks",
"peekOfCode": "def check_path(file_path: str) -> None:\n \"\"\"Checks if a file exists at file_path and is a png image.\n Args:\n file_path (str): Path to check\n Raises:\n Exception: Exception if the path is invalid.\n \"\"\"\n if not exists(file_path):\n raise Exception('File does not exist.')\ndef check_png(file_path: str) -> None:",
"detail": "src.checks",
"documentation": {}
},
{
"label": "check_png",
"kind": 2,
"importPath": "src.checks",
"description": "src.checks",
"peekOfCode": "def check_png(file_path: str) -> None:\n \"\"\"Checks if the file is a png image.\n Args:\n file_path (str): Path to check.\n \"\"\"\n if not file_path.endswith('.png'):\n raise Exception(f'Path must end with \".png\". Got {file_path[-4:]}.')\ndef check_rgb(img: np.ndarray) -> None:\n \"\"\"Checks if image is a 3 dimensional array with 3 channels.\n Args:",
"detail": "src.checks",
"documentation": {}
},
{
"label": "check_rgb",
"kind": 2,
"importPath": "src.checks",
"description": "src.checks",
"peekOfCode": "def check_rgb(img: np.ndarray) -> None:\n \"\"\"Checks if image is a 3 dimensional array with 3 channels.\n Args:\n img (np.ndarray): Image to check.\n Raises:\n Exception: Exception if image is not a 3 dimensional array with 3 channels.\n \"\"\"\n if not (len(img.shape) == 3 and img.shape[2] == 3):\n raise Exception(f'The images must be 3 dimensional (RGB) arrays. Got an array of shape {img.shape}.')\ndef check_shape(img1: np.ndarray, img2: np.ndarray) -> None:",
"detail": "src.checks",
"documentation": {}
},
{
"label": "check_shape",
"kind": 2,
"importPath": "src.checks",
"description": "src.checks",
"peekOfCode": "def check_shape(img1: np.ndarray, img2: np.ndarray) -> None:\n \"\"\"Checks if img1 and img2 have the same shape.\n Args:\n img1 (np.ndarray): First image.\n img2 (np.ndarray): Second image.\n Raises:\n Exception: Exception if img1 and img2 do not have the same shape.\n \"\"\"\n if img1.shape != img2.shape:\n raise Exception(f'The images must have the same shape. Got {img1.shape} and {img2.shape}.')",
"detail": "src.checks",
"documentation": {}
},
{
"label": "check_data_range",
"kind": 2,
"importPath": "src.checks",
"description": "src.checks",
"peekOfCode": "def check_data_range(img: np.ndarray) -> None:\n \"\"\"Checks if the values of img are in the interval [0, 1].\n Args:\n img (np.ndarray): Image to check.\n Raises:\n Exception: Exception if img's values are not in [0, 1].\n \"\"\"\n if np.max(img) > 1 or np.min(img) < 0:\n raise Exception(f'Pixel\\'s values must be in range [0, 1]. Got range [{np.min(img)}, {np.max(img)}].')\ndef check_cfa(cfa: str) -> None:",
"detail": "src.checks",
"documentation": {}
},
{
"label": "check_cfa",
"kind": 2,
"importPath": "src.checks",
"description": "src.checks",
"peekOfCode": "def check_cfa(cfa: str) -> None:\n \"\"\"Checks if the CFA's name is correct.\n Args:\n cfa (str): CFA name.\n Raises:\n Exception: Exception if the name of the CFA is not correct.\n \"\"\"\n if cfa not in ['bayer', 'quad_bayer']:\n raise Exception(f'Unknown CFA name. Got {cfa} but expected either bayer or quad_bayer.')\n####",
"detail": "src.checks",
"documentation": {}
},
{
"label": "CFA",
"kind": 6,
"importPath": "src.forward_model",
"description": "src.forward_model",
"peekOfCode": "class CFA():\n def __init__(self, cfa: str, input_shape: tuple) -> None:\n \"\"\"Constructor of the forward operator's class.\n Args:\n cfa (str): Name of the pattern. Either bayer or quad_bayer.\n input_shape (tuple): Shape of the input images of the operator.\n \"\"\"\n check_cfa(cfa)\n self.cfa = cfa\n self.input_shape = input_shape",
"detail": "src.forward_model",
"documentation": {}
},
{
"label": "get_bayer_mask",
"kind": 2,
"importPath": "src.forward_model",
"description": "src.forward_model",
"peekOfCode": "def get_bayer_mask(input_shape: tuple) -> np.ndarray:\n \"\"\"Return the mask of the Bayer CFA.\n Args:\n input_shape (tuple): Shape of the mask.\n Returns:\n np.ndarray: Mask.\n \"\"\"\n res = np.kron(np.ones((input_shape[0], input_shape[1], 1)), [0, 1, 0])\n res[::2, 1::2] = [1, 0, 0]\n res[1::2, ::2] = [0, 0, 1]",
"detail": "src.forward_model",
"documentation": {}
},
{
"label": "get_quad_bayer_mask",
"kind": 2,
"importPath": "src.forward_model",
"description": "src.forward_model",
"peekOfCode": "def get_quad_bayer_mask(input_shape: tuple) -> np.ndarray:\n \"\"\"Return the mask of the quad_bayer CFA.\n Args:\n input_shape (tuple): Shape of the mask.\n Returns:\n np.ndarray: Mask.\n \"\"\"\n res = np.kron(np.ones((input_shape[0], input_shape[1], 1)), [0, 1, 0])\n res[::4, 2::4] = [1, 0, 0]\n res[::4, 3::4] = [1, 0, 0]",
"detail": "src.forward_model",
"documentation": {}
},
{
"label": "normalise_image",
"kind": 2,
"importPath": "src.utils",
"description": "src.utils",
"peekOfCode": "def normalise_image(img: np.ndarray) -> np.ndarray:\n \"\"\"Normalise the values of img in the interval [0, 1].\n Args:\n img (np.ndarray): Image to normalise.\n Returns:\n np.ndarray: Normalised image.\n \"\"\"\n return (img - np.min(img)) / np.ptp(img)\ndef load_image(file_path: str) -> np.ndarray:\n \"\"\"Loads the image located in file_path.",
"detail": "src.utils",
"documentation": {}
},
{
"label": "load_image",
"kind": 2,
"importPath": "src.utils",
"description": "src.utils",
"peekOfCode": "def load_image(file_path: str) -> np.ndarray:\n \"\"\"Loads the image located in file_path.\n Args:\n file_path (str): Path to the file containing the image. Must end by '.png'.\n Returns:\n np.ndarray: Loaded image.\n \"\"\"\n check_path(file_path)\n check_png(file_path)\n return normalise_image(imread(file_path))",
"detail": "src.utils",
"documentation": {}
},
{
"label": "save_image",
"kind": 2,
"importPath": "src.utils",
"description": "src.utils",
"peekOfCode": "def save_image(file_path: str, img: np.ndarray) -> None:\n \"\"\"Saves the image located in file_path.\n Args:\n file_path (str): Path to the file in which the image will be saved. Must end by '.png'.\n img (np.ndarray): Image to save.\n \"\"\"\n check_path(file_path.split('/')[-2])\n check_png(file_path)\n imsave(file_path, (img * 255).astype(np.uint8))\ndef psnr(img1: np.ndarray, img2: np.ndarray) -> float:",
"detail": "src.utils",
"documentation": {}
},
{
"label": "psnr",
"kind": 2,
"importPath": "src.utils",
"description": "src.utils",
"peekOfCode": "def psnr(img1: np.ndarray, img2: np.ndarray) -> float:\n \"\"\"Computes the PSNR between img1 and img2 after some sanity checks.\n img1 and img2 must:\n - have the same shape;\n - be in range [0, 1].\n Args:\n img1 (np.ndarray): First image.\n img2 (np.ndarray): Second image.\n Returns:\n float: PSNR between img1 and img2.",
"detail": "src.utils",
"documentation": {}
},
{
"label": "ssim",
"kind": 2,
"importPath": "src.utils",
"description": "src.utils",
"peekOfCode": "def ssim(img1: np.ndarray, img2: np.ndarray) -> float:\n \"\"\"Computes the SSIM between img1 and img2 after some sanity checks.\n img1 and img2 must:\n - have the same shape;\n - be in range [0, 1];\n - be 3 dimensional array with 3 channels.\n Args:\n img1 (np.ndarray): First image.\n img2 (np.ndarray): Second image.\n Returns:",
"detail": "src.utils",
"documentation": {}
}
]
\ No newline at end of file
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File added
from scipy import ndimage
import numpy as np
############################################################
def color_pixel(i,j,cfa = "bayer"):
if (cfa == "quad_bayer"):
i = i//2
j = j//2
if ((i+j)%2==0):
return 'green'
else:
if (i%2==0):
return 'red'
else:
return 'blue'
def rmse_pixel(pixel_raw,pixel_extrapolate):
return np.sqrt(np.mean((pixel_raw-pixel_extrapolate)**2))
######### Method extrapolation with edge detection #########
def compute_orientation_matrix(img_raw):
vertical = ndimage.sobel(img_raw, 0)
horizontal = ndimage.sobel(img_raw, 1)
orientation_matrix = np.zeros(img_raw.shape)
orientation_matrix[vertical < horizontal] = 1
return orientation_matrix
## Green Channel ##
##Formulas for etrapolation of pixels:
def extrapolate_green_top(img_raw,i,j):
return img_raw[i-1,j] + 3/4*(img_raw[i,j]-img_raw[i-2,j])-1/4*(img_raw[i-1,j]-img_raw[i-3,j])
def extrapolate_green_bottom(img_raw,i,j):
return img_raw[i+1,j] + 3/4*(img_raw[i,j]-img_raw[i+2,j])-1/4*(img_raw[i+1,j]-img_raw[i+3,j])
def extrapolate_green_left(img_raw,i,j):
return img_raw[i,j-1] + 3/4*(img_raw[i,j]-img_raw[i,j-2])-1/4*(img_raw[i,j-1]-img_raw[i,j-3])
def extrapolate_green_right(img_raw,i,j):
return img_raw[i,j+1] + 3/4*(img_raw[i,j]-img_raw[i,j+2])-1/4*(img_raw[i,j+1]-img_raw[i,j+3])
## Extrapolation method:
def median_extrapolate_green_pixel(img_raw,i,j,orientations_to_drop):
list_extrapolate_pixel = []
if ("top" not in orientations_to_drop):
list_extrapolate_pixel.append(extrapolate_green_top(img_raw,i,j))
if ("bottom" not in orientations_to_drop):
list_extrapolate_pixel.append(extrapolate_green_bottom(img_raw,i,j))
if("left" not in orientations_to_drop):
list_extrapolate_pixel.append(extrapolate_green_left(img_raw,i,j))
if("right" not in orientations_to_drop):
list_extrapolate_pixel.append(extrapolate_green_right(img_raw,i,j))
return np.median(list_extrapolate_pixel)
def extrapolate_green_pixel(img_raw,i,j,orientation):
# First the borders:
orientations_to_drop = []
if (i<2):
orientations_to_drop.append('top')
if (i>img_raw.shape[0]-4):
orientations_to_drop.append('bottom')
if (j<2):
orientations_to_drop.append('left')
if (j>img_raw.shape[1]-4):
orientations_to_drop.append('right')
# Then the rest of the image:
else:
if (orientation == 1): # V < H so we gonna eliminate one horizontal pixel.
if ("right" not in orientations_to_drop and "left" not in orientations_to_drop):
rmse_pixel_left = rmse_pixel(img_raw[i,j],extrapolate_green_left(img_raw,i,j))
rmse_pixel_right = rmse_pixel(img_raw[i,j],extrapolate_green_right(img_raw,i,j))
if (rmse_pixel_left > rmse_pixel_right):
orientations_to_drop.append('left')
else:
orientations_to_drop.append('right')
else: # V > H so we gonna eliminate one vertical pixel.
if ("top" not in orientations_to_drop and "bottom" not in orientations_to_drop):
rmse_pixel_top = rmse_pixel(img_raw[i,j],extrapolate_green_top(img_raw,i,j))
rmse_pixel_bottom = rmse_pixel(img_raw[i,j],extrapolate_green_bottom(img_raw,i,j))
if (rmse_pixel_top > rmse_pixel_bottom):
orientations_to_drop.append('top')
else:
orientations_to_drop.append('bottom')
return median_extrapolate_green_pixel(img_raw,i,j,orientations_to_drop)
def extrapolate_green(img_raw,extrapolate_img):
orientation_matrix = compute_orientation_matrix(img_raw)
for i in range(img_raw.shape[0]):
for j in range(img_raw.shape[1]):
if (color_pixel(i,j)!= "green"):
extrapolate_img[i,j,1] = extrapolate_green_pixel(img_raw,i,j,orientation_matrix[i,j])
else:
extrapolate_img[i,j,1] = img_raw[i,j]
return extrapolate_img
## Red and Blue Channels ##
def extrapolate_top(img_raw,img_extrapolate,i,j):
return (img_raw[i-1,j] + img_raw[i,j]-img_extrapolate[i-1,j,1])
def extrapolate_left(img_raw,img_extrapolate,i,j):
return (img_raw[i,j-1] + img_raw[i,j]-img_extrapolate[i,j-1,1])
def extrapolate_right(img_raw,img_extrapolate,i,j):
return (img_raw[i,j+1] + img_raw[i,j]-img_extrapolate[i,j+1,1])
def extrapolate_bottom(img_raw,img_extrapolate,i,j):
return (img_raw[i+1,j] + img_raw[i,j]-img_extrapolate[i+1,j,1])
def extrapolate_top_left(img_raw,img_extrapolate,i,j):
return (img_raw[i-1,j-1] + img_extrapolate[i,j,1]-img_extrapolate[i-1,j-1,1])
def extrapolate_top_right(img_raw,img_extrapolate,i,j):
return (img_raw[i-1,j+1] + img_extrapolate[i,j,1]-img_extrapolate[i-1,j+1,1])
def extrapolate_bottom_left(img_raw,img_extrapolate,i,j):
return (img_raw[i+1,j-1] + img_extrapolate[i,j,1]-img_extrapolate[i+1,j-1,1])
def extrapolate_bottom_right(img_raw,img_extrapolate,i,j):
return (img_raw[i+1,j+1] + img_extrapolate[i,j,1]-img_extrapolate[i+1,j+1,1])
def median_pixel(img_raw,img_extrapolate,i,j,orientations_to_drop):
list_extrapolate = []
if (color_pixel(i,j) != "green"):
if("top_left" not in orientations_to_drop):
list_extrapolate.append(extrapolate_top_left(img_raw,img_extrapolate,i,j))
if("top_right" not in orientations_to_drop):
list_extrapolate.append(extrapolate_top_right(img_raw,img_extrapolate,i,j))
if("bottom_left" not in orientations_to_drop):
list_extrapolate.append(extrapolate_bottom_left(img_raw,img_extrapolate,i,j))
if("bottom_right" not in orientations_to_drop):
list_extrapolate.append(extrapolate_bottom_right(img_raw,img_extrapolate,i,j))
elif (color_pixel(i,j) == "green"):
if("top" not in orientations_to_drop):
list_extrapolate.append(extrapolate_top(img_raw,img_extrapolate,i,j))
if("left" not in orientations_to_drop):
list_extrapolate.append(extrapolate_left(img_raw,img_extrapolate,i,j))
if("right" not in orientations_to_drop):
list_extrapolate.append(extrapolate_right(img_raw,img_extrapolate,i,j))
if("bottom" not in orientations_to_drop):
list_extrapolate.append(extrapolate_bottom(img_raw,img_extrapolate,i,j))
return np.median(list_extrapolate)
def extrapolate_pixel(img_raw,img_extrapolate,i,j,color):
orientations_to_drop = []
if (color_pixel(i,j)!='green'):
if (i<1):
orientations_to_drop.append("top_left")
orientations_to_drop.append("top_right")
if (i>img_raw.shape[0]-2):
orientations_to_drop.append("bottom_left")
orientations_to_drop.append("bottom_right")
if (j<1):
orientations_to_drop.append("top_left")
orientations_to_drop.append("bottom_left")
if (j>img_raw.shape[1]-2):
orientations_to_drop.append("top_right")
orientations_to_drop.append("bottom_right")
if ("top_left" not in orientations_to_drop and "top_right" not in orientations_to_drop and "bottom_left" not in orientations_to_drop and "bottom_right" not in orientations_to_drop):
rmse_top_left = rmse_pixel(img_raw[i,j],extrapolate_top_left(img_raw,img_extrapolate,i,j))
rmse_top_right = rmse_pixel(img_raw[i,j],extrapolate_top_right(img_raw,img_extrapolate,i,j))
rmse_bottom_left = rmse_pixel(img_raw[i,j],extrapolate_bottom_left(img_raw,img_extrapolate,i,j))
rmse_bottom_right = rmse_pixel(img_raw[i,j],extrapolate_bottom_right(img_raw,img_extrapolate,i,j))
if (rmse_bottom_left> rmse_bottom_right and rmse_bottom_left> rmse_top_left and rmse_bottom_left> rmse_top_right):
orientations_to_drop.append("bottom_left")
elif (rmse_bottom_right> rmse_bottom_left and rmse_bottom_right> rmse_top_left and rmse_bottom_right> rmse_top_right):
orientations_to_drop.append("bottom_right")
elif (rmse_top_left> rmse_bottom_left and rmse_top_left> rmse_bottom_right and rmse_top_left> rmse_top_right):
orientations_to_drop.append("top_left")
else:
orientations_to_drop.append("top_right")
elif(color_pixel(i,j)=="green"):
if (i<1):
orientations_to_drop.append("top")
if (i>img_raw.shape[0]-2):
orientations_to_drop.append("bottom")
if (j<1):
orientations_to_drop.append("left")
if (j>img_raw.shape[1]-2):
orientations_to_drop.append("right")
if ((i%2!=0 and color == "red") or (i%2==0 and color == "blue")):
if ("right" not in orientations_to_drop and "left" not in orientations_to_drop):
rmse_pixel_left = rmse_pixel(img_raw[i,j],extrapolate_left(img_raw,img_extrapolate,i,j))
rmse_pixel_right = rmse_pixel(img_raw[i,j],extrapolate_right(img_raw,img_extrapolate,i,j))
if (rmse_pixel_left > rmse_pixel_right):
orientations_to_drop.append('left')
else:
orientations_to_drop.append('right')
else:
if ("top" not in orientations_to_drop and "bottom" not in orientations_to_drop):
rmse_pixel_top = rmse_pixel(img_raw[i,j],extrapolate_top(img_raw,img_extrapolate,i,j))
rmse_pixel_bottom = rmse_pixel(img_raw[i,j],extrapolate_bottom(img_raw,img_extrapolate,i,j))
if (rmse_pixel_top > rmse_pixel_bottom):
orientations_to_drop.append('top')
else:
orientations_to_drop.append('bottom')
return median_pixel(img_raw,img_extrapolate,i,j,orientations_to_drop)
def extrapolate_red(img_raw,img_extrapolate):
for i in range(img_raw.shape[0]):
for j in range(img_raw.shape[1]):
if (color_pixel(i,j)!="red"):
img_extrapolate[i,j,0] = extrapolate_pixel(img_raw,img_extrapolate,i,j,"red")
else:
img_extrapolate[i,j,0] = img_raw[i,j]
def extrapolate_blue(img_raw,img_extrapolate):
for i in range(img_raw.shape[0]):
for j in range(img_raw.shape[1]):
if (color_pixel(i,j)!="blue"):
img_extrapolate[i,j,2] = extrapolate_pixel(img_raw,img_extrapolate,i,j,"blue")
else:
img_extrapolate[i,j,2] = img_raw[i,j]
def extrapolate_img(img_cfa):
extapolate_img = np.zeros(img_cfa.shape + (3,))
extrapolate_green(img_cfa,extapolate_img)
extrapolate_red(img_cfa,extapolate_img)
extrapolate_blue(img_cfa,extapolate_img)
return extapolate_img
#################################################
##QUAD BAYER
#################################################
## Green Channel ##
### Formulas extrapolation of pixels:
def extrapolate_green_top_quad(img_raw,i,j):
extrapolate_quad = np.zeros((2,2))
for m in range(2):
for n in range(2):
extrapolate_quad[m,n] = img_raw[i+m-1*2,j+n] + 3/4*(img_raw[i+m,j+n]-img_raw[i+m-2*2,j+n])-1/4*(img_raw[i+m-1*2,j+n]-img_raw[i+m-3*2,j+n])
return extrapolate_quad
def extrapolate_green_bottom_quad(img_raw,i,j):
extrapolate_quad = np.zeros((2,2))
for m in range(2):
for n in range(2):
extrapolate_quad[m,n] = img_raw[i+m+1*2,j+n] + 3/4*(img_raw[i+m,j+n]-img_raw[i+m+2*2,j+n])-1/4*(img_raw[i+m+1*2,j+n]-img_raw[i+m+3*2,j+n])
return extrapolate_quad
def extrapolate_green_left_quad(img_raw,i,j):
extrapolate_quad = np.zeros((2,2))
for m in range(2):
for n in range(2):
extrapolate_quad[m,n] = img_raw[i+m,j+n-1*2] + 3/4*(img_raw[i+m,j+n]-img_raw[i+m,j+n-2*2])-1/4*(img_raw[i+m,j+n-1*2]-img_raw[i+m,j+n-3*2])
return extrapolate_quad
def extrapolate_green_right_quad(img_raw,i,j):
extrapolate_quad = np.zeros((2,2))
for m in range(2):
for n in range(2):
extrapolate_quad[m,n] = img_raw[i+m,j+n+1*2] + 3/4*(img_raw[i+m,j+n]-img_raw[i+m,j+n+2*2])-1/4*(img_raw[i+m,j+n+1*2]-img_raw[i+m,j+n+3*2])
return extrapolate_quad
### Extrapolation method:
def median_extrapolate_green_pixel_quad(img_raw,i,j,orientations_to_drop):
list_extrapolate_pixel = []
if ("top" not in orientations_to_drop):
list_extrapolate_pixel.append(extrapolate_green_top_quad(img_raw,i,j))
if ("bottom" not in orientations_to_drop):
list_extrapolate_pixel.append(extrapolate_green_bottom_quad(img_raw,i,j))
if("left" not in orientations_to_drop):
list_extrapolate_pixel.append(extrapolate_green_left_quad(img_raw,i,j))
if("right" not in orientations_to_drop):
list_extrapolate_pixel.append(extrapolate_green_right_quad(img_raw,i,j))
median_quad = np.zeros((2,2))
for m in range(2):
for n in range(2):
median_quad[m,n] = np.median([list_extrapolate_pixel[k] for k in range(len(list_extrapolate_pixel))])
return median_quad
def extrapolate_green_pixel_quad(img_raw,i,j,orientation):
# First the borders:
orientations_to_drop = []
if (i<2):
orientations_to_drop.append('top')
if (i>img_raw.shape[0]-4*2):
orientations_to_drop.append('bottom')
if (j<2):
orientations_to_drop.append('left')
if (j>img_raw.shape[1]-4*2):
orientations_to_drop.append('right')
# Then the rest of the image:
else:
if (orientation >0.5): # V < H so we gonna eliminate one horizontal pixel.
if ("right" not in orientations_to_drop and "left" not in orientations_to_drop):
rmse_pixel_left = rmse_pixel(img_raw[i:i+2,j:j+2],extrapolate_green_left_quad(img_raw,i,j))
rmse_pixel_right = rmse_pixel(img_raw[i:i+2,j:j+2],extrapolate_green_right_quad(img_raw,i,j))
if (np.sum(rmse_pixel_left) > np.sum(rmse_pixel_right)):
orientations_to_drop.append('left')
else:
orientations_to_drop.append('right')
else: # V > H so we gonna eliminate one vertical pixel.
if ("top" not in orientations_to_drop and "bottom" not in orientations_to_drop):
rmse_pixel_top = rmse_pixel(img_raw[i+2,j+2],extrapolate_green_top_quad(img_raw,i,j))
rmse_pixel_bottom = rmse_pixel(img_raw[i+2,j+2],extrapolate_green_bottom_quad(img_raw,i,j))
if (np.sum(rmse_pixel_top) > np.sum(rmse_pixel_bottom)):
orientations_to_drop.append('top')
else:
orientations_to_drop.append('bottom')
return median_extrapolate_green_pixel_quad(img_raw,i,j,orientations_to_drop)
def extrapolate_green_quad(img_raw,extrapolate_img):
orientation_matrix = compute_orientation_matrix(img_raw)
for i in range(0,img_raw.shape[0],2):
for j in range(0,img_raw.shape[1],2):
if (color_pixel(i,j,'quad_bayer')!= "green"):
extrapolate_img[i:i+2,j:j+2,1] = extrapolate_green_pixel_quad(img_raw,i,j,(1/4) *np.sum(orientation_matrix[i:i+2,j:j+2]))
else:
extrapolate_img[i:i+2,j:j+2,1] = img_raw[i:i+2,j:j+2]
return extrapolate_img
## Red and Blue Channels ##
def extrapolate_top_quad(img_raw,img_extrapolate,i,j):
extrapolate_quad = np.zeros((2,2))
for m in range(2):
for n in range(2):
extrapolate_quad[m,n] = (img_raw[i+m-1*2,j+n] + img_raw[i+m,j+n]-img_extrapolate[i+m-1*2,j+n,1])
return extrapolate_quad
def extrapolate_left_quad(img_raw,img_extrapolate,i,j):
extrapolate_quad = np.zeros((2,2))
for m in range(2):
for n in range(2):
extrapolate_quad[m,n] = (img_raw[i+m,j+n-1*2] + img_raw[i+m,j+n]-img_extrapolate[i+m,j+n-1*2,1])
return extrapolate_quad
def extrapolate_right_quad(img_raw,img_extrapolate,i,j):
extrapolate_quad = np.zeros((2,2))
for m in range(2):
for n in range(2):
extrapolate_quad[m,n] = (img_raw[i+m,j+n+1*2] + img_raw[i+m,j+n]-img_extrapolate[i+m,j+n+1*2,1])
return extrapolate_quad
def extrapolate_bottom_quad(img_raw,img_extrapolate,i,j):
extrapolate_quad = np.zeros((2,2))
for m in range(2):
for n in range(2):
extrapolate_quad[m,n] = (img_raw[i+m+1*2,j+n] + img_raw[i+m,j+n]-img_extrapolate[i+m+1*2,j+n,1])
return extrapolate_quad
def extrapolate_top_left_quad(img_raw,img_extrapolate,i,j):
extrapolate_quad = np.zeros((2,2))
for m in range(2):
for n in range(2):
extrapolate_quad[m,n] =(img_raw[i+m-1*2,j+n-1*2] + img_extrapolate[i+m,j+n,1]-img_extrapolate[i+m-1*2,j+n-1*2,1])
return extrapolate_quad
def extrapolate_top_right_quad(img_raw,img_extrapolate,i,j):
extrapolate_quad = np.zeros((2,2))
for m in range(2):
for n in range(2):
extrapolate_quad[m,n] =(img_raw[i+m-1*2,j+n+1*2] + img_extrapolate[i+m,j+n,1]-img_extrapolate[i+m-1*2,j+n+1*2,1])
return extrapolate_quad
def extrapolate_bottom_left_quad(img_raw,img_extrapolate,i,j):
extrapolate_quad = np.zeros((2,2))
for m in range(2):
for n in range(2):
extrapolate_quad[m,n] =(img_raw[i+m+1*2,j+n-1*2] + img_extrapolate[i+m,j+n,1]-img_extrapolate[i+m+1*2,j+n-1*2,1])
return extrapolate_quad
def extrapolate_bottom_right_quad(img_raw,img_extrapolate,i,j):
extrapolate_quad = np.zeros((2,2))
for m in range(2):
for n in range(2):
extrapolate_quad[m,n] =(img_raw[i+m+1*2,j+n+1*2] + img_extrapolate[i+m,j+n,1]-img_extrapolate[i+m+1*2,j+n+1*2,1])
return extrapolate_quad
def median_pixel_quad(img_raw,img_extrapolate,i,j,orientations_to_drop):
list_extrapolate = []
if (color_pixel(i,j,"quad_bayer") != "green"):
if("top_left" not in orientations_to_drop):
list_extrapolate.append(extrapolate_top_left_quad(img_raw,img_extrapolate,i,j))
if("top_right" not in orientations_to_drop):
list_extrapolate.append(extrapolate_top_right_quad(img_raw,img_extrapolate,i,j))
if("bottom_left" not in orientations_to_drop):
list_extrapolate.append(extrapolate_bottom_left_quad(img_raw,img_extrapolate,i,j))
if("bottom_right" not in orientations_to_drop):
list_extrapolate.append(extrapolate_bottom_right_quad(img_raw,img_extrapolate,i,j))
elif (color_pixel(i,j,"quad_bayer") == "green"):
if("top" not in orientations_to_drop):
list_extrapolate.append(extrapolate_top_quad(img_raw,img_extrapolate,i,j))
if("left" not in orientations_to_drop):
list_extrapolate.append(extrapolate_left_quad(img_raw,img_extrapolate,i,j))
if("right" not in orientations_to_drop):
list_extrapolate.append(extrapolate_right_quad(img_raw,img_extrapolate,i,j))
if("bottom" not in orientations_to_drop):
list_extrapolate.append(extrapolate_bottom_quad(img_raw,img_extrapolate,i,j))
median_quad = np.zeros((2,2))
for m in range(2):
for n in range(2):
median_quad[m,n] = np.median([list_extrapolate[k][m,n] for k in range(len(list_extrapolate))])
return median_quad
def extrapolate_pixel_quad(img_raw,img_extrapolate,i,j,color):
orientations_to_drop = []
if (color_pixel(i,j,"quad_bayer")!='green'):
if (i<1):
orientations_to_drop.append("top_left")
orientations_to_drop.append("top_right")
if (i>img_raw.shape[0]-2*2):
orientations_to_drop.append("bottom_left")
orientations_to_drop.append("bottom_right")
if (j<1):
orientations_to_drop.append("top_left")
orientations_to_drop.append("bottom_left")
if (j>img_raw.shape[1]-2*2):
orientations_to_drop.append("top_right")
orientations_to_drop.append("bottom_right")
if ("top_left" not in orientations_to_drop and "top_right" not in orientations_to_drop and "bottom_left" not in orientations_to_drop and "bottom_right" not in orientations_to_drop):
rmse_top_left = rmse_pixel(img_raw[i:i+2,j:j+2],extrapolate_top_left_quad(img_raw,img_extrapolate,i,j))
rmse_top_right = rmse_pixel(img_raw[i:i+2,j:j+2],extrapolate_top_right_quad(img_raw,img_extrapolate,i,j))
rmse_bottom_left = rmse_pixel(img_raw[i:i+2,j:j+2],extrapolate_bottom_left_quad(img_raw,img_extrapolate,i,j))
rmse_bottom_right = rmse_pixel(img_raw[i:i+2,j:j+2],extrapolate_bottom_right_quad(img_raw,img_extrapolate,i,j))
if (rmse_bottom_left> rmse_bottom_right and rmse_bottom_left> rmse_top_left and rmse_bottom_left> rmse_top_right):
orientations_to_drop.append("bottom_left")
elif (rmse_bottom_right> rmse_bottom_left and rmse_bottom_right> rmse_top_left and rmse_bottom_right> rmse_top_right):
orientations_to_drop.append("bottom_right")
elif (rmse_top_left> rmse_bottom_left and rmse_top_left> rmse_bottom_right and rmse_top_left> rmse_top_right):
orientations_to_drop.append("top_left")
else:
orientations_to_drop.append("top_right")
elif(color_pixel(i,j,"quad_bayer")=="green"):
if (i<1):
orientations_to_drop.append("top")
if (i>img_raw.shape[0]-2*2):
orientations_to_drop.append("bottom")
if (j<1):
orientations_to_drop.append("left")
if (j>img_raw.shape[1]-2*2):
orientations_to_drop.append("right")
if (((i/2)%2!=0 and color == "red") or ((i/2)%2==0 and color == "blue")):
if ("right" not in orientations_to_drop and "left" not in orientations_to_drop):
rmse_pixel_left = rmse_pixel(img_raw[i:i+2,j:j+2],extrapolate_left_quad(img_raw,img_extrapolate,i,j))
rmse_pixel_right = rmse_pixel(img_raw[i:i+2,j:j+2],extrapolate_right_quad(img_raw,img_extrapolate,i,j))
if (rmse_pixel_left > rmse_pixel_right):
orientations_to_drop.append('left')
else:
orientations_to_drop.append('right')
else:
if ("top" not in orientations_to_drop and "bottom" not in orientations_to_drop):
rmse_pixel_top = rmse_pixel(img_raw[i:i+2,j:j+2],extrapolate_top_quad(img_raw,img_extrapolate,i,j))
rmse_pixel_bottom = rmse_pixel(img_raw[i:i+2,j:j+2],extrapolate_bottom_quad(img_raw,img_extrapolate,i,j))
if (rmse_pixel_top > rmse_pixel_bottom):
orientations_to_drop.append('top')
else:
orientations_to_drop.append('bottom')
return median_pixel_quad(img_raw,img_extrapolate,i,j,orientations_to_drop)
def extrapolate_red_quad(img_raw,img_extrapolate):
for i in range(0,img_raw.shape[0],2):
for j in range(0,img_raw.shape[1],2):
if (color_pixel(i,j,"quad_bayer")!="red"):
img_extrapolate[i:i+2,j:j+2,0] = extrapolate_pixel_quad(img_raw,img_extrapolate,i,j,"red")
else:
img_extrapolate[i:i+2,j:j+2,0] = img_raw[i:i+2,j:j+2]
def extrapolate_blue_quad(img_raw,img_extrapolate):
for i in range(0,img_raw.shape[0],2):
for j in range(0,img_raw.shape[1],2):
if (color_pixel(i,j,"quad_bayer")!="blue"):
img_extrapolate[i:i+2,j:j+2,2] = extrapolate_pixel_quad(img_raw,img_extrapolate,i,j,"blue")
else:
img_extrapolate[i:i+2,j:j+2,2] = img_raw[i:i+2,j:j+2]
def extrapolate_img_quad(img_cfa):
extapolate_img = np.zeros(img_cfa.shape + (3,))
extrapolate_green_quad(img_cfa,extapolate_img)
extrapolate_red_quad(img_cfa,extapolate_img)
extrapolate_blue_quad(img_cfa,extapolate_img)
return extapolate_img
def extrapolate_cfa(img_cfa,cfa):
if (cfa=="bayer"):
return extrapolate_img(img_cfa)
elif(cfa=="quad_bayer"):
return extrapolate_img_quad(img_cfa)
else:
print("Error: cfa not recognized")
return None
\ No newline at end of file
source diff could not be displayed: it is too large. Options to address this: view the blob.
"""The main file for the reconstruction.
This file should NOT be modified except the body of the 'run_reconstruction' function.
Students can call their functions (declared in others files of src/methods/your_name).
"""
import numpy as np
import functions as fu
from src.forward_model import CFA
import importlib
importlib.reload(fu)
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
# Performing the reconstruction.
# TODO
return fu.extrapolate_cfa(y,cfa)
File added
"""The main file for the reconstruction.
This file should NOT be modified except the body of the 'run_reconstruction' function.
Students can call their functions (declared in others files of src/methods/your_name).
"""
import numpy as np
from src.forward_model import CFA
from src.methods.Chardon_tom.utils import *
import pywt
#!!!!!!!! It is normal that the reconstructions lasts several minutes (3min on my computer)
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
# Define constants and operators
cfa_name = 'bayer' # bayer or quad_bayer
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa_name, input_shape)
res = op.adjoint(y)
N,M = input_shape[0], input_shape[1]
#interpolating green channel
for i in range (N):
for j in range (M):
if res[i,j,1] ==0:
neighbors = get_neighbors(res,1,i,j,N,M)
weights = get_weights(res,i,j,1,N,M)
res[i,j,1] = interpolate_green(weights, neighbors)
#first intepolation of red channel
for i in range (1,N,2):
for j in range (0,M,2):
neighbors = get_neighbors(res,0,i,j,N,M)
neighbors_G = get_neighbors(res,1,i,j,N,M)
weights = get_weights(res,i,j,0,N,M)
res[i,j,0] = interpolate_red_blue(weights,neighbors, neighbors_G)
# second interpolation of red channel
for i in range (N):
for j in range (M):
if res[i,j,0] ==0:
neighbors = get_neighbors(res,0,i,j,N,M)
weights = get_weights(res,i,j,0,N,M)
res[i,j,0] = interpolate_green(weights, neighbors)
#first interpolation of blue channel
for i in range (0,N,2):
for j in range (1,M,2):
neighbors = get_neighbors(res,2,i,j,N,M)
neighbors_G = get_neighbors(res,1,i,j,N,M)
weights = get_weights(res,i,j,2,N,M)
res[i,j,2] = interpolate_red_blue(weights, neighbors, neighbors_G)
#second interpolation of blue channel
for i in range (N):
for j in range (M):
if res[i,j,2] ==0:
neighbors = get_neighbors(res,2,i,j,N,M)
weights = get_weights(res,i,j,2,N,M)
res[i,j,2] = interpolate_green(weights,neighbors)
# k=0
# while k<2 :
# for i in range(input_shape[0]):
# for j in range(input_shape[1]):
# res[i][j][1] = correction_green(res,i,j,N,M)
# for i in range(input_shape[0]):
# for j in range(input_shape[1]):
# res[i][j][0] = correction_red(res,i,j,N,M)
# for i in range(input_shape[0]):
# for j in range(input_shape[1]):
# res[i][j][2] = correction_blue(res,i,j,N,M)
# k+=1
res[res>1] = 1
res[res<0] = 0
return res
import numpy as np
import pywt
def get_neighbors (img,channel,i,j,N,M):
P1 = img[(i-1)%N,(j-1)%M,channel]
P2 = img[(i-1)%N,j%M,channel]
P3 = img[(i-1)%N,(j+1)%M,channel]
P4 = img[i%N,(j-1)%M,channel]
P5 = img[i%N,j%M,channel]
P6 = img[i%N,(j+1)%M,channel]
P7 = img[(i+1)%N,(j-1)%M,channel]
P8 = img[(i+1)%N,j%M,channel]
P9 = img[(i+1)%N,(j+1)%M,channel]
return np.array([P1,P2,P3,P4,P5,P6,P7,P8,P9])
def get_derivatives(neighbors):
[P1, P2, P3, P4, P5, P6, P7, P8, P9] = neighbors
D_x = (P4 - P6)/2
D_y = (P2 - P8)/2
D_xd = (P3 - P7)/(2*np.sqrt(2))
D_yd = (P1 - P9)/(2*np.sqrt(2))
return ([D_x, D_y, D_xd, D_yd])
def get_weights(mosaic_image, i, j, channel, N, M):
derivatives_neigbors = []
for l in range(-1, 2):
for L in range(-1, 2):
derivatives_neigbors.append(get_derivatives(
get_neighbors(mosaic_image, channel, i+l, j+L, N, M)))
[Dx, Dy, Dxd, Dyd] = derivatives_neigbors[4]
E1 = 1/np.sqrt(1 + Dyd**2 + derivatives_neigbors[0][3]**2)
E2 = 1/np.sqrt(1 + Dy**2 + derivatives_neigbors[1][1]**2)
E3 = 1/np.sqrt(1 + Dxd**2 + derivatives_neigbors[2][2]**2)
E4 = 1/np.sqrt(1 + Dx**2 + derivatives_neigbors[3][0]**2)
E6 = 1/np.sqrt(1 + Dxd**2 + derivatives_neigbors[5][2]**2)
E7 = 1/np.sqrt(1 + Dy**2 + derivatives_neigbors[6][1]**2)
E8 = 1/np.sqrt(1 + Dyd**2 + derivatives_neigbors[7][3]**2)
E9 = 1/np.sqrt(1 + Dx**2 + derivatives_neigbors[8][0]**2)
E = [E1, E2, E3, E4, E6, E7, E8, E9]
return E
def interpolate_green(weights, neighbors):
[E1, E2, E3, E4, E6, E7, E8, E9] = weights
[P1, P2, P3, P4, P5, P6, P7, P8, P9] = neighbors
I5 = (E2*P2 + E4*P4 + E6*P6 + E8*P8)/(E2 + E4 + E6 + E8)
return (I5)
def interpolate_red_blue(weights, neighbors, green_neighbors):
[E1, E2, E3, E4, E6, E7, E8, E9] = weights
[P1, P2, P3, P4, P5, P6, P7, P8, P9] = neighbors
[G1, G2, G3, G4, G5, G6, G7, G8, G9] = green_neighbors
I5 = G5*(E1*P1/G1 + E3*P3/G3 + E7*P7/G7 + E9*P9/G9)/(E1 + E3 + E7 + E9)
return (I5)
def correction_green(res,i,j,N,M):
[G1,G2,G3,G4,G5,G6,G7,G8,G9] = get_neighbors(res,1,i,j,N,M)
[R1,R2,R3,R4,R5,R6,R7,R8,R9] = get_neighbors(res,0,i,j,N,M)
[B1,B2,B3,B4,B5,B6,B7,B8,B9] = get_neighbors(res,2,i,j,N,M)
[E1,E2,E3,E4,E6,E7,E8,E9] = get_weights(res,i,j,1,N,M)
Gb5 = R5*((E2*G2)/B2 + (E4*G4)/B4 + (E6*G6)/B6 + (E8*G8)/B8)/(E2 + E4 + E6 + E8)
Gr5 = B5*((E2*G2)/R2 + (E4*G4)/R4 + (E6*G6)/R6 + (E8*G8)/R8)/(E2 + E4 + E6 + E8)
G5 = (Gb5 + Gr5)/2
return G5
def correction_red(res,i,j,N,M) :
[G1,G2,G3,G4,G5,G6,G7,G8,G9] = get_neighbors(res,1,i,j,N,M)
[R1,R2,R3,R4,R5,R6,R7,R8,R9] = get_neighbors(res,0,i,j,N,M)
[E1,E2,E3,E4,E6,E7,E8,E9] = get_weights(res,i,j,0,N,M)
R5 = G5*((E1*R1)/G1 + (E2*R2)/G2 + (E3*R3)/G3 + (E4*R4)/G4 + (E6*R6)/G6 + (E7*R7)/G7 + (E8*R8)/G8 + (E9*R9)/G9)/(E1 + E2 + E3 + E4 + E6 + E7 + E8 + E9)
return R5
def correction_blue(res,i,j,N,M) :
[G1,G2,G3,G4,G5,G6,G7,G8,G9] = get_neighbors(res,1,i,j,N,M)
[B1,B2,B3,B4,B5,B6,B7,B8,B9] = get_neighbors(res,2,i,j,N,M)
[E1,E2,E3,E4,E6,E7,E8,E9] = get_weights(res,i,j,2,N,M)
B5 = G5*((E1*B1)/G1 + (E2*B2)/G2 + (E3*B3)/G3 + (E4*B4)/G4 + (E6*B6)/G6 + (E7*B7)/G7 + (E8*B8)/G8 + (E9*B9)/G9)/(E1 + E2 + E3 + E4 + E6 + E7 + E8 + E9)
return B5
File added
import numpy as np
def find_Knearest_neighbors(z, chan, i, j, N, M):
"""Finds all the neighbors of a pixel on a given channel"""
"""Finds a pixel's neighbors on a channel"""
return np.array([z[(i+di)%N, (j+dj)%M, chan] for di in range(-1, 2) for dj in range(-1, 2)])
def calculate_directional_gradients(neighbors):
"""Calculates the directional derivative of a pixel"""
"""Gives the directional derivative"""
P1, P2, P3, P4, P5, P6, P7, P8, P9 = neighbors
Dx, Dy = (P4 - P6)/2, (P2 - P8)/2
Dxd, Dyd = (P3 - P7)/(2*np.sqrt(2)), (P1 - P9)/(2*np.sqrt(2))
return [Dx, Dy, Dxd, Dyd]
def calculate_adaptive_weights(z, neigh, dir_deriv,chan,i,j,N,M):
"""Finds all the neighbors of a pixel on a given channel"""
[Dx,Dy,Dxd,Dyd] = dir_deriv
[P1,P2,P3,P4,P5,P6,P7,P8,P9] = neigh
E = []
......@@ -34,8 +34,7 @@ def calculate_adaptive_weights(z, neigh, dir_deriv,chan,i,j,N,M):
return E
def interpolate_pixel(neigh,weights):
"""interpolates pixels from a grid where one of two pixels is missing regularly spaced"""
"""This function performs interpolation for a single pixel by calculating a weighted average of its neighboring pixels"""
[P1,P2,P3,P4,P5,P6,P7,P8,P9] = neigh
[E1,E2,E3,E4,E6,E7,E8,E9] = weights
num5 = E2*P2 + E4*P4 + E6*P6 + E8*P8
......@@ -44,7 +43,7 @@ def interpolate_pixel(neigh,weights):
return I5
def interpolate_RedBlue(neighbors, neighbors_G, weights):
"""Interpolates the central missing pixel from the red or blue channel from a Bayer pattern."""
"""This function specifically interpolates a pixel in the red or blue channels"""
[P1,P2,P3,P4,P5,P6,P7,P8,P9] = neighbors
[G1,G2,G3,G4,G5,G6,G7,G8,G9] = neighbors_G
[E1,E2,E3,E4,E6,E7,E8,E9] = weights
......
source diff could not be displayed: it is too large. Options to address this: view the blob.
......@@ -4,17 +4,7 @@ from src.methods.ELAMRANI_Mouna.functions import *
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
# Define constants and operators
cfa_name = 'bayer' # bayer or quad_bayer
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa_name, input_shape)
......
File added
import numpy as np
from scipy.signal import correlate2d
from src.forward_model import CFA
def malvar_he_cutler(y: np.ndarray, op: CFA ) -> np.ndarray:
"""Performs demosaicing using the malvar-he-cutler algorithm
Args:
op (CFA): CFA operator.
y (np.ndarray): Mosaicked image.
Returns:
np.ndarray: Demosaicked image.
"""
red_mask, green_mask, blue_mask = [op.mask[:, :, 0], op.mask[:, :, 1], op.mask[:, :, 2]]
mosaicked_image = np.float32(y)
demosaicked_image = np.empty(op.input_shape)
if op.cfa == 'quad_bayer':
filters = get_quad_bayer_filters()
else:
filters = get_default_filters()
demosaicked_image = apply_demosaicking_filters(
mosaicked_image,demosaicked_image, red_mask, green_mask, blue_mask, filters
)
return demosaicked_image
def get_quad_bayer_filters():
coefficient_scale = 0.03125
return {
"G_at_R_and_B": np.array([
[0, 0, 0, 0, -1, -1, 0, 0, 0, 0],
[0, 0, 0, 0, -1, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 2, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 2, 0, 0, 0, 0],
[-1, -1, 2, 2, 4, 4, 2, 2, -1, -1],
[-1, -1, 2, 2, 4, 4, 2, 2, -1, -1],
[0, 0, 0, 0, 2, 2, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 2, 0, 0, 0, 0],
[0, 0, 0, 0, -1, -1, 0, 0, 0, 0],
[0, 0, 0, 0, -1, -1, 0, 0, 0, 0]
]) * coefficient_scale,
"R_at_GR_and_B_at_GB": np.array([
[0, 0, 0, 0, 0.5, 0.5, 0, 0, 0, 0],
[0, 0, 0, 0, 0.5, 0.5, 0, 0, 0, 0],
[0, 0, -1, -1, 0, 0, -1, -1, 0, 0],
[0, 0, -1, -1, 0, 0, -1, -1, 0, 0],
[-1, -1, 4, 4, 5, 5, 4, 4, -1, -1],
[-1, -1, 4, 4, 5, 5, 4, 4, -1, -1],
[0, 0, -1, -1, 0, 0, -1, -1, 0, 0],
[0, 0, -1, -1, 0, 0, -1, -1, 0, 0],
[0, 0, 0, 0, 0.5, 0.5, 0, 0, 0, 0],
[0, 0, 0, 0, 0.5, 0.5, 0, 0, 0, 0]
]) * coefficient_scale,
"R_at_GB_and_B_at_GR": np.array([
[0, 0, 0, 0, -1, -1, 0, 0, 0, 0],
[0, 0, 0, 0, -1, -1, 0, 0, 0, 0],
[0, 0, -1, -1, 4, 4, -1, -1, 0, 0],
[0, 0, -1, -1, 4, 4, -1, -1, 0, 0],
[0.5, 0.5, 0, 0, 5, 5, 0, 0, 0.5, 0.5],
[0.5, 0.5, 0, 0, 5, 5, 0, 0, 0.5, 0.5],
[0, 0, -1, -1, 4, 4, -1, -1, 0, 0],
[0, 0, -1, -1, 4, 4, -1, -1, 0, 0],
[0, 0, 0, 0, -1, -1, 0, 0, 0, 0],
[0, 0, 0, 0, -1, -1, 0, 0, 0, 0]
]) * coefficient_scale,
"R_at_B_and_B_at_R": np.array([
[0, 0, 0, 0, -1.5, -1.5, 0, 0, 0, 0],
[0, 0, 0, 0, -1.5, -1.5, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 0, 0, 2, 2, 0, 0],
[-1.5, -1.5, 0, 0, 6, 6, 0, 0, -1.5, -1.5],
[-1.5, -1.5, 0, 0, 6, 6, 0, 0, -1.5, -1.5],
[0, 0, 2, 2, 0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 0, 0, 2, 2, 0, 0],
[0, 0, 0, 0, -1.5, -1.5, 0, 0, 0, 0],
[0, 0, 0, 0, -1.5, -1.5, 0, 0, 0, 0]
]) * coefficient_scale,
}
def get_default_filters():
coefficient_scale = 0.125
return {
"G_at_R_and_B": np.array([
[0, 0, -1, 0, 0],
[0, 0, 2, 0, 0],
[-1, 2, 4, 2, -1],
[0, 0, 2, 0, 0],
[0, 0, -1, 0, 0]
]) * coefficient_scale,
"R_at_GR_and_B_at_GB": np.array([
[0, 0, 0.5, 0, 0],
[0, -1, 0, -1, 0],
[-1, 4, 5, 4, -1],
[0, -1, 0, -1, 0],
[0, 0, 0.5, 0, 0]
]) * coefficient_scale,
"R_at_GB_and_B_at_GR": np.array([
[0, 0, -1, 0, 0],
[0, -1, 4, -1, 0],
[0.5, 0, 5, 0, 0.5],
[0, -1, 4, -1, 0],
[0, 0, -1, 0, 0]
]) * coefficient_scale,
"R_at_B_and_B_at_R": np.array([
[0, 0, -1.5, 0, 0],
[0, 2, 0, 2, 0],
[-1.5, 0, 6, 0, -1.5],
[0, 2, 0, 2, 0],
[0, 0, -1.5, 0, 0]
]) * coefficient_scale,
}
def apply_demosaicking_filters(image, res, red_mask, green_mask, blue_mask, filters):
red_channel = image * red_mask
green_channel = image * green_mask
blue_channel = image * blue_mask
# Create the green channel after applying a filter
green_channel = np.where(
np.logical_or(red_mask == 1, blue_mask == 1),
correlate2d(image, filters['G_at_R_and_B'], mode="same", boundary="symm"),
green_channel
)
# Define masks for extracting pixel values
red_row_mask = np.any(red_mask == 1, axis=1)[:, np.newaxis].astype(np.float32)
red_col_mask = np.any(red_mask == 1, axis=0)[np.newaxis].astype(np.float32)
blue_row_mask = np.any(blue_mask == 1, axis=1)[:, np.newaxis].astype(np.float32)
blue_col_mask = np.any(blue_mask == 1, axis=0)[np.newaxis].astype(np.float32)
def update_channel(channel, row_mask, col_mask, filter_key):
return np.where(
np.logical_and(row_mask == 1, col_mask == 1),
correlate2d(image, filters[filter_key], mode="same", boundary="symm"),
channel
)
# Update the red channel and blue channel
red_channel = update_channel(red_channel, red_row_mask, blue_col_mask, 'R_at_GR_and_B_at_GB')
red_channel = update_channel(red_channel, blue_row_mask, red_col_mask, 'R_at_GB_and_B_at_GR')
blue_channel = update_channel(blue_channel, blue_row_mask, red_col_mask, 'R_at_GR_and_B_at_GB')
blue_channel = update_channel(blue_channel, red_row_mask, blue_col_mask, 'R_at_GB_and_B_at_GR')
# Update R channel and B channel again
red_channel = update_channel(red_channel, blue_row_mask, blue_col_mask, 'R_at_B_and_B_at_R')
blue_channel = update_channel(blue_channel, red_row_mask, red_col_mask, 'R_at_B_and_B_at_R')
res[:, :, 0] = red_channel
res[:, :, 1] = green_channel
res[:, :, 2] = blue_channel
return res
\ No newline at end of file
"""The main file for the reconstruction.
This file should NOT be modified except the body of the 'run_reconstruction' function.
Students can call their functions (declared in others files of src/methods/your_name).
"""
import numpy as np
from src.forward_model import CFA
from src.methods.EL_MURR_Theresa.malvar import malvar_he_cutler
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
# Performing the reconstruction.
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
res = malvar_he_cutler(y,op)
return res
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
File added