From acfa56fdef5064fb482c2de668c8dd3e50983739 Mon Sep 17 00:00:00 2001
From: Chloe Mimeau <Chloe.Mimeau@imag.fr>
Date: Thu, 25 Jul 2013 10:26:53 +0000
Subject: [PATCH] big scales update

---
 .../{cart_mesh.f90 => cart_mesh_tools.f90}    |   61 +-
 .../scalesInterface/layout/cart_topology.f90  |  343 ++--
 HySoP/src/scalesInterface/output/vtkxml.f90   |   11 +-
 .../src/scalesInterface/output/vtkxml_bin.f90 |   10 +-
 HySoP/src/scalesInterface/particles/advec.f90 |  322 ++--
 .../src/scalesInterface/particles/advecX.f90  |  157 +-
 .../src/scalesInterface/particles/advecY.f90  |   78 +-
 .../src/scalesInterface/particles/advecZ.f90  |   83 +-
 .../particles/advec_Vector.f90                |  303 +---
 .../particles/advec_common_group.F90          |  185 ++
 .../particles/advec_common_interpol.F90       | 1302 ++++++++++++++
 .../particles/advec_common_remesh.F90         | 1551 +++++++++++++++++
 .../particles/advec_correction.f90            |   39 +-
 .../particles/advec_remesh_Mprime.f90         |  295 +++-
 .../particles/advec_remesh_lambda.f90         |  144 +-
 .../scalesInterface/particles/advec_type.f90  |   41 +-
 .../particles/advec_variables.f90             |   87 +-
 .../particles/interpolation_velo.F90          |  896 ++++++++++
 HySoP/src/scalesInterface/precision_tools.f90 |   11 +-
 19 files changed, 5064 insertions(+), 855 deletions(-)
 rename HySoP/src/scalesInterface/layout/{cart_mesh.f90 => cart_mesh_tools.f90} (70%)
 create mode 100644 HySoP/src/scalesInterface/particles/advec_common_group.F90
 create mode 100644 HySoP/src/scalesInterface/particles/advec_common_interpol.F90
 create mode 100644 HySoP/src/scalesInterface/particles/advec_common_remesh.F90
 create mode 100644 HySoP/src/scalesInterface/particles/interpolation_velo.F90

diff --git a/HySoP/src/scalesInterface/layout/cart_mesh.f90 b/HySoP/src/scalesInterface/layout/cart_mesh_tools.f90
similarity index 70%
rename from HySoP/src/scalesInterface/layout/cart_mesh.f90
rename to HySoP/src/scalesInterface/layout/cart_mesh_tools.f90
index 3e3ebc4e9..a60573898 100644
--- a/HySoP/src/scalesInterface/layout/cart_mesh.f90
+++ b/HySoP/src/scalesInterface/layout/cart_mesh_tools.f90
@@ -1,17 +1,24 @@
-!> @addtogroup cart_structure
+!USEFORTEST toolbox
+!USEFORTEST postprocess
+!USEFORTEST advec
+!USEFORTEST interpolation
+!USEFORTEST io
+!USEFORTEST topo
+!USEFORTEST avgcond
+!> @addtogroup toolbox
 !! @{
 
 !-----------------------------------------------------------------------------
 !
-! MODULE: cart_mesh
+! MODULE: cart_mesh_tools
 !
 !
-! DESCRIPTION: 
+! DESCRIPTION:
 !>  This module provide a mesh structure. It is used for output and as future
 !! base to deal with different scalar field computed with eventually different
 !! resolutions.
 !
-!> @details 
+!> @details
 !!  This module provide structure to save mesh context associated to a field.
 !! This allow to easily work with different resolutions and to know how
 !! mesh interact with the mpi topology.
@@ -23,14 +30,14 @@
 !
 !------------------------------------------------------------------------------
 
-module cart_mesh
+module cart_mesh_tools
 
     use precision_tools
 
     implicit none
 
     public
-    
+
     ! ===== Type =====
     ! > Information about mesh subdivision and on the global grid
     type cartesian_mesh
@@ -43,49 +50,19 @@ module cart_mesh
         !> information about min and max global indice associated to the current processus
         integer, dimension(3,2) :: absolute_extend
         !> space step for field discretisation
-        real(WP), dimension(3)  :: d_space
+        real(WP), dimension(3)  :: dx
+        !> Physical size
+        real(WP), dimension(3)  :: length
     end type cartesian_mesh
-        
+
 
     ! ===== Public procedures =====
     ! Auto-complete cartesian_mesh data field.
     public      :: mesh_save
-    ! Create a cartesian_mesh variable related to data save in cart_topolgoy module.
-    public      :: mesh_save_default
 
 
 contains
 
-!> Save data about the cartesian mesh create in cart_topology module
-!>    @param[out]   mesh    = varialbe of type cartesian_mesh where the data about mesh are save
-subroutine mesh_save_default(mesh)
-
-    use cart_topology   ! Description of mesh and of mpi topology
-    implicit none
-
-    ! Input/Output
-    type(cartesian_mesh), intent(out)       :: mesh
-    ! Other local variables
-    integer                                 :: direction    ! integer matching to a direction (X, Y or Z)
-
-    ! Number of mesh
-    mesh%N = N
-    mesh%N_proc = N_proc
-
-    ! Relative extend
-    mesh%relative_extend(:,1) = begin_proc
-    mesh%relative_extend(:,2) = end_proc
-    ! Absolute one
-    do direction = 1, 3
-        mesh%absolute_extend(direction,:) = coord(direction)*N_proc(direction) + mesh%relative_extend(direction,:)
-    end do
-
-    ! Space step
-    mesh%d_space = d_sc
-
-end subroutine mesh_save_default
-
-
 !> Auto-complete some field about "cartesian_mesh" variables.
 !>    @param[out]   mesh    = variable of type cartesian_mesh where the data about mesh are save
 !>    @param[in]    Nb      = number of grid point along each direction
@@ -118,10 +95,10 @@ subroutine mesh_save(mesh, Nb, Nb_proc, d_space, coord)
     end do
 
     ! Space step
-    mesh%d_space = d_space
+    mesh%dx = d_space
 
 end subroutine mesh_save
 
 
-end module cart_mesh
+end module cart_mesh_tools
 !> @}
diff --git a/HySoP/src/scalesInterface/layout/cart_topology.f90 b/HySoP/src/scalesInterface/layout/cart_topology.f90
index 4b57ba1f0..63554dee5 100644
--- a/HySoP/src/scalesInterface/layout/cart_topology.f90
+++ b/HySoP/src/scalesInterface/layout/cart_topology.f90
@@ -1,3 +1,10 @@
+!USEFORTEST toolbox
+!USEFORTEST avgcond
+!USEFORTEST postprocess
+!USEFORTEST advec
+!USEFORTEST interpolation
+!USEFORTEST io
+!USEFORTEST topo
 !> @addtogroup cart_structure
 !! @{
 
@@ -7,10 +14,10 @@
 ! MODULE: cart_topology
 !
 !
-! DESCRIPTION: 
+! DESCRIPTION:
 !>  This module provide a cartesien topology on the parrallel layout.
 !
-!> @details 
+!> @details
 !!  This module provide a cartesien topology on the parrallel layout.
 !! This virtual topology is created by the MPI procedures (and thus use
 !! low-level optimisation based on the underlyinfg hardware). It
@@ -36,34 +43,42 @@
 module cart_topology
 
     use precision_tools
+    use cart_mesh_tools
 
     implicit none
 
+    ! ===== Structure =====
+    ! ----- Structure to save work item information -----
+    ! This allow to use different resolution more easily.
+    type group_info
+      !> Computation are done by group of line. Here we define their size
+      integer, dimension(3,2)             :: group_size
+      !> To check if group size is initialized
+      logical                             :: group_init = .false.
+      !> To concatenate position in order to create unique mpi message tag
+      integer, dimension(3,2)             :: tag_size
+      !> To concatenate rank in order to create unique mpi message tag
+      integer                             :: tag_rank
+      !> To check if parameter is already initialized
+      logical                             :: mesh_init = .false.
+    end type group_info
+
 
     ! ===== Public variables =====
 
     ! ----- Communicators -----
     !> Communicator associated with the cartesian topology
-    integer, protected                  :: cart_comm        
+    integer, protected                  :: cart_comm
     !> Communicators devoted to 1-dimensionnal subgrids (along Y and Z)
-    integer, protected                  :: X_comm, Y_comm, Z_comm   
+    integer, protected                  :: X_comm, Y_comm, Z_comm
     !> Table of the previous communicators (ie comm devoted to 1D subgrids)
     integer, dimension(3), protected    :: D_comm
     !> Rank of immediate neighbors
-    integer, dimension(3,2), protected  :: neighbors
+    integer,dimension(3,-2:2),protected :: neighbors
 
-    ! ----- Information about mesh subdivision and on the global grid -----
+    ! ----- Information about current MPI processus and MPI topology
     !> number of processes in each direction
     integer, dimension(3), protected    :: nb_proc_dim
-    !> information about min and max local indice on the current directory
-    integer, dimension(3), protected    :: begin_proc, end_proc     
-    !> space lengh of th domain
-    real(WP), dimension(3), protected   :: length
-    !> space step for scalar discretisation
-    real(WP), dimension(3), protected   :: d_sc                     
-    !> number of (sub)grid in each direction
-    integer, dimension(3), protected    :: N
-    integer, dimension(3), protected    :: N_proc
     !> rank of current processus (in the cartesian communicator)
     integer, public                     :: cart_rank
     !> rank of current processus (in the in communicator associated to the different direction)
@@ -73,7 +88,13 @@ module cart_topology
     !> YZ coordinate of the current processus
     integer, dimension(2), protected    :: coordYZ
     !> Periodic boundary conditions: logical array, equals true if periodic
-    logical, dimension(3),protected     :: periods                      
+    logical, dimension(3),protected     :: periods
+
+    ! ------ Information about mesh subdivision and on the global grid -----
+    !> information about local mesh - for scalar
+    type(cartesian_mesh), protected     :: mesh_sc
+    !> REcopy of mesh_sc%N_proc for python interface
+    integer, dimension(3)               :: N_proc
     !> Computation are done by group of line. Here we define their size
     integer, dimension(3,2), protected  :: group_size
     !> To check if group size is initialized
@@ -86,9 +107,13 @@ module cart_topology
     logical, private                    :: mesh_init = .false.
     !> Default mesh resolution
     integer, parameter                  :: default_size = 80
+    !> information about local mesh - for velocity
+    type(cartesian_mesh), protected     :: mesh_V
+    !> To check if mesh is already initialized
+    logical, private                    :: mesh_velo_init = .false.
 
 
-    ! ==== Public procedures ==== 
+    ! ==== Public procedures ====
     ! Creation of the cartesian topology
     public      :: cart_create
     ! Initialise mesh information (first part)
@@ -104,8 +129,10 @@ module cart_topology
     private     :: set_group_size_1x2
     private     :: set_group_size_3
     private     :: set_group_size_init
+    ! Create a cartesian_mesh variable related to data save in cart_topolgoy module.
+    public      :: mesh_save_default
 
-    ! ==== Public procedures ==== 
+    ! ==== Public procedures ====
     ! Initialise mesh information (second part)
     private     :: discretisation_init
 
@@ -133,7 +160,7 @@ contains
 !!    @param[out]   ierr        = error code
 !!    @param[out]   spec_comm   = mpi communicator used by the spectral part of the code (optional).
 !!    @param[in]    topology    = to choose the dimension of the mpi topology (if 0 then none) (optional).
-!! @details 
+!! @details
 !!        This subroutine initialzed the mpi topologic and returns the communicator
 !!    that will be used for all the spectral part of the code (ie everything except
 !!    the particles part). If needed, it also initialzed all the mpi context
@@ -146,7 +173,7 @@ subroutine cart_create(dims, ierr, spec_comm, topology)
     ! Input/Output
     integer, dimension(:), intent(in)   :: dims
     integer, intent(out)                :: ierr
-    integer, optional, intent(out)      :: spec_comm        
+    integer, optional, intent(out)      :: spec_comm
     integer, optional, intent(in)       :: topology
     ! Other local variables
     logical                 :: reorganisation                   ! to choose to reordered or not the processus rank.
@@ -180,7 +207,7 @@ subroutine cart_create(dims, ierr, spec_comm, topology)
         ! --- Creation of the cartesian topology ---
         reorganisation = .true.
         periods = .true.
-        if (size(dims)==2) then 
+        if (size(dims)==2) then
             nb_proc_dim = (/ 1, dims(1), dims(2) /)
         else if (size(dims)==3) then
             nb_proc_dim = dims
@@ -203,7 +230,7 @@ subroutine cart_create(dims, ierr, spec_comm, topology)
         ! --- Create 1D communicator ---
         ! Subdivision in 1D-subgrids and creation of communicator devoted to
         ! 1D-communication
-        ! Communication along X-axis 
+        ! Communication along X-axis
         remains_dim = (/.true., .false., .false. /)
         call mpi_cart_sub(cart_comm, remains_dim, X_comm, ierr)
         D_comm(1) = X_comm
@@ -211,7 +238,7 @@ subroutine cart_create(dims, ierr, spec_comm, topology)
         remains_dim = (/.false., .true., .false. /)
         call mpi_cart_sub(cart_comm, remains_dim, Y_comm, ierr)
         D_comm(2) = Y_comm
-        ! Communication along Z-axis 
+        ! Communication along Z-axis
         remains_dim = (/ .false., .false., .true. /)
         call mpi_cart_sub(cart_comm, remains_dim, Z_comm, ierr)
         D_comm(3) = Z_comm
@@ -220,7 +247,9 @@ subroutine cart_create(dims, ierr, spec_comm, topology)
         call mpi_comm_rank(cart_comm, cart_rank, ierr)
         do direction = 1, 3
             call mpi_comm_rank(D_comm(direction), D_rank(direction), ierr)
-            call mpi_cart_shift(D_comm(direction), 0, 1, neighbors(direction,1), neighbors(direction,2), ierr)
+            call mpi_cart_shift(D_comm(direction), 0, 1, neighbors(direction,-1), neighbors(direction,1), ierr)
+            call mpi_cart_shift(D_comm(direction), 0, 2, neighbors(direction,-2), neighbors(direction,2), ierr)
+            neighbors(direction,0) = D_rank(direction)
         end do
         call mpi_cart_coords(cart_comm, cart_rank, 3, coord, ierr)
         coordYZ = (/ coord(2), coord(3) /)
@@ -230,10 +259,10 @@ subroutine cart_create(dims, ierr, spec_comm, topology)
         ! will be based.
         if (present(spec_comm)) then
             !> Rank numerotation in spectral communicator grow along first
-            !! direction first and then along the second, the opposite of mpi 
+            !! direction first and then along the second, the opposite of mpi
             !! rank numerotation. That is why processus are reoder and 2
             !! communicator are created.
-            !! Example with 4 processus 
+            !! Example with 4 processus
             !! coord    // mpi-cart rank    // spec rank
             !! (0,0,0)  // 0                // 0
             !! (0,1,0)  // 2                // 1
@@ -259,10 +288,10 @@ subroutine cart_create(dims, ierr, spec_comm, topology)
         if (present(spec_comm)) then
             spec_comm = MPI_COMM_WORLD
         end if
-        call MPI_COMM_RANK(MPI_COMM_WORLD,cart_rank,ierr)
+        call mpi_comm_rank(MPI_COMM_WORLD,cart_rank,ierr)
     end select
 
-    
+
     ! Print some minimal information about the topology
     if (cart_rank == 0) then
         write(*,'(a)') ''
@@ -279,71 +308,90 @@ subroutine cart_create(dims, ierr, spec_comm, topology)
 
 end subroutine cart_create
 
-!> Create the mesh structure associated to the topology 
-!!    @param[in]    Nx  = number of meshes along X
-!!    @param[in]    Ny  = number of meshes along X
-!!    @param[in]    Nz  = number of meshes along X
-!!    @param[in]    Lx  = number of meshes along X
-!!    @param[in]    Ly  = number of meshes along Y
-!!    @param[in]    Lz  = number of meshes along Z
+!> Create the mesh structure associated to the topology
+!!    @param[in]    Nx          = number of meshes along X
+!!    @param[in]    Ny          = number of meshes along X
+!!    @param[in]    Nz          = number of meshes along X
+!!    @param[in]    Lx          = number of meshes along X
+!!    @param[in]    Ly          = number of meshes along Y
+!!    @param[in]    Lz          = number of meshes along Z
+!!    @param[in]    verbosity   =  logical to unactivate verbosity (show message about group size change or not)
 !! @details
 !!    Initialise the mesh data associated to the mpi topology and used by the
 !!    particle solver
 !!    @author Jean-Baptiste Lagaert
-subroutine discretisation_create(Nx, Ny, Nz, Lx, Ly, Lz)
+subroutine discretisation_create(Nx, Ny, Nz, Lx, Ly, Lz, verbosity)
 
     implicit none
 
     ! Input/Output
-    integer, intent(in)     :: Nx, Ny, Nz
-    real(WP), intent(in)    ::Lx, Ly, Lz
+    integer, intent(in)             :: Nx, Ny, Nz
+    real(WP), intent(in)            :: Lx, Ly, Lz
+    logical, intent(in), optional   :: verbosity    ! To unactivate verbosity
+
+    ! Others
+    logical                 :: show_message
+
+    ! Init verbosity parameter
+    show_message = .true.
+    if(present(verbosity)) show_message = verbosity
 
     ! A cubic geometry : unitary lengh and 100 mesh points in each direction.
-    N(1) = Nx
-    N(2) = Ny
-    N(3) = Nz
+    mesh_sc%N(1) = Nx
+    mesh_sc%N(2) = Ny
+    mesh_sc%N(3) = Nz
 
-    length(1)= Lx
-    length(2)= Ly
-    length(3)= Lz
+    mesh_sc%length(1)= Lx
+    mesh_sc%length(2)= Ly
+    mesh_sc%length(3)= Lz
 
-    N_proc = N / nb_proc_dim
-    begin_proc = 1
-    end_proc = N_proc
+    mesh_sc%N_proc = mesh_sc%N / nb_proc_dim
+    N_proc = mesh_sc%N_proc
+    mesh_sc%relative_extend(:,1) = 1
+    mesh_sc%relative_extend(:,2) = mesh_sc%N_proc
 
     ! Adjust group size :
     call set_group_size_init()
     ! Finish init
     mesh_init = .false.
-    call discretisation_init()
+    call discretisation_init(show_message)
 
 end subroutine discretisation_create
 
 !> Defaut mesh setup
-!!    @author Jean-Baptiste Lagaert
+!! @author Jean-Baptiste Lagaert
+!!    @param[in]    verbosity   =  logical to unactivate verbosity (show message about group size change or not)
 !! @details
 !!    Initialise the mesh data associated to the mpi topology and used by the
 !!    particle solver to a default 100x100x100 mesh grid.
-subroutine discretisation_default()
+subroutine discretisation_default(verbosity)
 
-    implicit none
+    logical, intent(in), optional   :: verbosity    ! To unactivate verbosity
+
+    logical                 :: show_message
+
+    ! Init verbosity parameter
+    show_message = .true.
+    if(present(verbosity)) show_message = verbosity
 
     ! A cubic geometry : unitary lengh and 100 mesh points in each direction.
-    N = default_size
-    length = 1.
-    N_proc = N / nb_proc_dim
-    begin_proc = 1
-    end_proc = N_proc
+    mesh_sc%N = default_size
+    mesh_sc%length = 1.
+    mesh_sc%N_proc = mesh_sc%N / nb_proc_dim
+    N_proc = mesh_sc%N_proc
+    mesh_sc%relative_extend(:,1) = 1
+    mesh_sc%relative_extend(:,2) = mesh_sc%N_proc
 
     group_init = .false.
     call set_group_size_init()
     mesh_init = .false.
-    call discretisation_init()
-    
+    call discretisation_init(show_message)
+
 end subroutine discretisation_default
 
 !> To initialize some hidden mesh parameters
-!!    @author Jean-Baptiste Lagaert
+!! @author Jean-Baptiste Lagaert
+!!    @param[in]    verbosity   = optional, logical used to unactivate verbosity
 !! @details
 !!        In order to deal well with the mpi topology, the data structure and the
 !!    mesh cut, some other parameters have to be initialised. Some are parameters
@@ -352,43 +400,36 @@ end subroutine discretisation_default
 !!    to avoid communication error or to allowed some optimization. For example, it
 !!    include variable used to create unique tag for the different mpi communication,
 !!    to gather line in group and to index these group.
-subroutine discretisation_init()
+subroutine discretisation_init(verbosity)
 
-    implicit none
+    logical, intent(in), optional   :: verbosity    ! To unactivate verbosity
 
     integer                 :: direction    ! direction (along X = 1, along Y = 2, along Z = 3)
     integer                 :: group_dir    ! direction "bis"
     integer, dimension(3,2) :: N_group      ! number of group on one processus along one direction
+    logical                 :: show_message
 
-    d_sc = length/(N)
+    mesh_sc%dx = mesh_sc%length/(mesh_sc%N)
+    show_message = .true.
+    if(present(verbosity)) show_message = verbosity
 
     ! Compute number of group
     ! Group of line along X
-    N_group(1,1) = N_proc(2)/group_size(1,1)
-    N_group(1,2) = N_proc(3)/group_size(1,2)
-    ! Group of line along Y
-    N_group(2,1) = N_proc(1)/group_size(2,1)
-    N_group(2,2) = N_proc(3)/group_size(2,2)
-    ! Group of line along Z
-    N_group(3,1) = N_proc(1)/group_size(3,1)
-    N_group(3,2) = N_proc(2)/group_size(3,2)
-! But not everything is done by groups !!
-! Group of line along X
-N_group(1,1) = N_proc(2)
-N_group(1,2) = N_proc(3)
-! Group of line along X
-N_group(2,1) = N_proc(1)
-N_group(2,2) = N_proc(3)
-! Group of line along X
-N_group(3,1) = N_proc(1)
-N_group(3,2) = N_proc(2)
-    
+    N_group(1,1) = mesh_sc%N_proc(2)/group_size(1,1)
+    N_group(1,2) = mesh_sc%N_proc(3)/group_size(1,2)
+    ! Group of line along X
+    N_group(2,1) = mesh_sc%N_proc(1)/group_size(2,1)
+    N_group(2,2) = mesh_sc%N_proc(3)/group_size(2,2)
+    ! Group of line along X
+    N_group(3,1) = mesh_sc%N_proc(1)/group_size(3,1)
+    N_group(3,2) = mesh_sc%N_proc(2)/group_size(3,2)
+
     ! tag_size = smallest power of ten to ensure tag_size > max ind_group
     do direction = 1,3
         tag_size(direction,:) = 1
         do group_dir = 1,2
             do while (N_group(direction, group_dir)/(10**tag_size(direction, group_dir))>1)
-                tag_size(direction, group_dir) = tag_size(direction, group_dir)+1 
+                tag_size(direction, group_dir) = tag_size(direction, group_dir)+1
             end do
         end do
     end do
@@ -399,15 +440,18 @@ N_group(3,2) = N_proc(2)
     end do
     if (tag_rank == 1) tag_rank = 2
 
+    ! Default velocity mesh = same mesh than scalar
+    mesh_V = mesh_sc
+
     ! Print some information about mesh used
-    if(cart_rank==0) then
+    if((cart_rank==0).and.(show_message)) then
         write(*,'(a)') ''
-        if(mesh_init) then 
+        if(mesh_init) then
             write(*,'(6x,a,a24,a)') 'XXXXXX','group sized changed ','XXXXXX'
         else
             write(*,'(6x,a,a30,a)') '-- ','mesh size',' --'
-            write(*,'(6x,a,3(x,i0))') 'global size =',N
-            write(*,'(6x,a,3(x,i0))') 'local size =',N_proc
+            write(*,'(6x,a,3(x,i0))') 'global size =',mesh_sc%N
+            write(*,'(6x,a,3(x,i0))') 'local size =',mesh_sc%N_proc
         end if
         write(*,'(6x,a,2(x,i0))') 'group size along X =',group_size(1,:)
         write(*,'(6x,a,2(x,i0))') 'group size along Y =',group_size(2,:)
@@ -425,15 +469,34 @@ N_group(3,2) = N_proc(2)
 
 end subroutine discretisation_init
 
+!> To change velocity resolution
+!!    @param[in] Nx   = number of points along X
+!!    @param[in] Ny   = number of points along Y
+!!    @param[in] Nz   = number of points along Z
+subroutine discretisation_set_mesh_Velo(Nx, Ny, Nz)
+
+    integer, intent(in) :: Nx, Ny, Nz
+
+    mesh_V%N(1) = Nx
+    mesh_V%N(2) = Ny
+    mesh_V%N(3) = Nz
+
+    mesh_V%N_proc = mesh_V%N / nb_proc_dim
+    mesh_V%relative_extend(:,2) = mesh_V%N_proc
+
+    mesh_V%dx = mesh_V%length/(mesh_V%N)
+
+end subroutine discretisation_set_mesh_Velo
+
 !> Compute unique tag for mpi message by concatenation of position (ie line coordinate), proc_gap and unique Id
 !!    @param[in]    ind_group   = indice of current group of line
 !!    @param[in]    tag_param   = couple of int unique for each message (used to create the tag)
 !!    @param[in]    direction   = current direction
 !!    @param[in]    proc_gap    = number of processus between the sender and the receiver
 !!    @return       tag         = unique tag: at each message send during an iteration have a different tag
-!!@details 
+!!@details
 !!     Use this procedure to compute tag in order to communicate with a distant processus or/and when
-!!    you will send more then two message. It produce longer tag compute_tag_NP because rather tyo use 0/1 it 
+!!    you will send more then two message. It produce longer tag compute_tag_NP because rather tyo use 0/1 it
 !!    put the gap between the sender and the receiver (ie the number of processus between them) in the tag.
 !!    Using these two procedure allow to obtain more unique tag for communication.
 function compute_tag_gap(ind_group, tag_param, direction,proc_gap) result(tag)
@@ -454,7 +517,7 @@ function compute_tag_gap(ind_group, tag_param, direction,proc_gap) result(tag)
     abs_proc_gap = max(abs(proc_gap),1)
     tag = (tag_param(1)*10+direction)*(10**(tag_rank+1))
     if (proc_gap>=0) then
-        tag = tag + proc_gap*10 
+        tag = tag + proc_gap*10
     else
         tag = tag - proc_gap*10 +1
     end if
@@ -468,7 +531,7 @@ function compute_tag_gap(ind_group, tag_param, direction,proc_gap) result(tag)
         !print*, 'tag too big - regenerated'
         tag = (tag_param(1))*(10**(tag_rank+1))
         if (proc_gap>=0) then
-            tag = tag + proc_gap*10 
+            tag = tag + proc_gap*10
         else
             tag = tag - proc_gap*10 +1
         end if
@@ -479,7 +542,7 @@ function compute_tag_gap(ind_group, tag_param, direction,proc_gap) result(tag)
             !print*, 'tag very too big - regenerated'
             tag = (tag_param(1))*(10**(tag_rank+1))
             if (proc_gap>=0) then
-                tag = tag + proc_gap*10 
+                tag = tag + proc_gap*10
             else
                 tag = tag - proc_gap*10 +1
             end if
@@ -502,12 +565,12 @@ end function compute_tag_gap
 !!    @param[in]    ind_group   = indice of current group of line
 !!    @param[in]    tag_param   = couple of int unique for each message (used to create the tag)
 !!    @param[in]    direction   = current direction
-!!    @return       tag_table   = unique couple tag: use tag_table(1) for mesage to previous proc. (or first 
+!!    @return       tag_table   = unique couple tag: use tag_table(1) for mesage to previous proc. (or first
 !!                                message ) and tag_table(2) for the other message.
-!!@details 
+!!@details
 !!     Use this procedure to compute tag for communication with your neighbor or when only two message are send:
 !!    it produce smaller tag then compute_tag_gap because the gap between sender and receiver are replaced by 1,
-!!    for communicate with previous processus (or first of the two message), or 0, for communication with next 
+!!    for communicate with previous processus (or first of the two message), or 0, for communication with next
 !!    processus (or the second message). It allow to reuse some unique Id.
 function compute_tag_NP(ind_group, tag_param, direction) result (tag_table)
 
@@ -524,9 +587,9 @@ function compute_tag_NP(ind_group, tag_param, direction) result (tag_table)
 
     tag_table(2) = (tag_param(1)*10+direction)*10
     tag_table(1) = tag_table(2)
-    
+
     tag_table(2) = tag_table(2) +1
-   
+
     tag_table(2) = (tag_table(2)*(10**tag_size(direction,1)))+(ind_group(1)-1)
     tag_table(1) = (tag_table(1)*(10**tag_size(direction,1)))+(ind_group(1)-1)
 
@@ -551,23 +614,29 @@ end function compute_tag_NP
 
 !> Adjust the private variable "group_size": line are gathering on group of same
 !! size undependant from the direction
-!!    @param[in]    s       =  integer such as group will gather sxs lines
-!!    @param[in]    init    =  logical to said if it is a default init of group_size
+!!    @param[in]    s           =  integer such as group will gather sxs lines
+!!    @param[in]    init        =  logical to said if it is a default init of group_size
+!!    @param[in]    verbosity   =  logical to unactivate verbosity (show message about group size change or not)
 !! @details
 !!    Create group of line s x s along the three direction.
-subroutine set_group_size_1(s, init)
+subroutine set_group_size_1(s, init, verbosity)
 
     implicit none
     integer, intent(in)             :: s
     logical, intent(in), optional   :: init
+    logical, intent(in), optional   :: verbosity
 
     if (.not.mesh_init) then
         group_size = s
         ! And now group size is initialized !
         group_init = .true.
     else
-        if (all(mod(N_proc,s)==0)) group_size = s
-        call discretisation_init()
+        if (all(mod(mesh_sc%N_proc,s)==0)) group_size = s
+        if (present(verbosity)) then
+            call discretisation_init(verbosity=verbosity)
+        else
+            call discretisation_init()
+        end if
     end if
 
     if (present(init)) call set_group_size(init)
@@ -580,13 +649,15 @@ end subroutine set_group_size_1
 !!    @param[in]    s1      =  integer such as group will gather s1 line along first remaining direction
 !!    @param[in]    s2      =  integer such as group will gather s1 line along second remaining direction
 !!    @param[in]    init    =  logical to said if it is a default init of group_size
+!!    @param[in]    verbo   =  logical to unactivate verbosity (show message about group size change or not)
 !! @details
 !!    Created group will gather s1 x s2 lines
-subroutine set_group_size_1x2(s1, s2, init)
+subroutine set_group_size_1x2(s1, s2, init, verbo)
 
     implicit none
     integer, intent(in)             :: s1, s2
     logical, intent(in), optional   :: init
+    logical, intent(in), optional   :: verbo
 
     if (.not. mesh_init) then
         group_size(:,1) = s1
@@ -594,9 +665,13 @@ subroutine set_group_size_1x2(s1, s2, init)
         ! And now group size is initialized !
         group_init = .true.
     else
-        if (all(mod(N_proc,s1)==0)) group_size(:,1) = s1
-        if (all(mod(N_proc,s2)==0)) group_size(:,2) = s2
-        call discretisation_init()
+        if (all(mod(mesh_sc%N_proc,s1)==0)) group_size(:,1) = s1
+        if (all(mod(mesh_sc%N_proc,s2)==0)) group_size(:,2) = s2
+        if (present(verbo)) then
+            call discretisation_init(verbosity=verbo)
+        else
+            call discretisation_init()
+        end if
     end if
 
     if (present(init)) call set_group_size(init)
@@ -610,23 +685,29 @@ end subroutine set_group_size_1x2
 !!    @param[in]    sY =  integer such as group of lines along Y will gather sY x sY lines
 !!    @param[in]    sZ =  integer such as group of lines along Z will gather sZ x sX lines
 !!    @param[in]    init    =  logical to said if it is a default init of group_size
-subroutine set_group_size_3(sX, sY, sZ, init)
+!!    @param[in]    verbo   =  logical to unactivate verbosity (show message about group size change or not)
+subroutine set_group_size_3(sX, sY, sZ, init, verbo)
 
     implicit none
     integer, intent(in)             :: sX, sY, sZ
     logical, intent(in), optional   :: init
+    logical, intent(in), optional   :: verbo
 
     if (.not.mesh_init) then
-        group_size(1,:) = sX
-        group_size(2,:) = sY
-        group_size(3,:) = sZ
+        group_size(1,:) = (/sY, sZ/)
+        group_size(2,:) = (/sX, sZ/)
+        group_size(3,:) = (/sX, sY/)
         ! And now group size is initialized !
         group_init = .true.
     else
-        if (all(mod(N_proc(2:3),sX)==0)) group_size(1,:) = sX
-        if ((mod(N_proc(1),sY)==0).and.(mod(N_proc(3),sY)==0)) group_size(2,:) = sY
-        if ((mod(N_proc(1),sZ)==0).and.(mod(N_proc(2),sZ)==0)) group_size(3,:) = sZ
-        call discretisation_init()
+        if (all(mod(mesh_sc%N_proc(2:3),sX)==0)) group_size(1,:) = sX
+        if ((mod(mesh_sc%N_proc(1),sY)==0).and.(mod(mesh_sc%N_proc(3),sY)==0)) group_size(2,:) = sY
+        if ((mod(mesh_sc%N_proc(1),sZ)==0).and.(mod(mesh_sc%N_proc(2),sZ)==0)) group_size(3,:) = sZ
+        if (present(verbo)) then
+            call discretisation_init(verbosity=verbo)
+        else
+            call discretisation_init()
+        end if
     end if
 
     if (present(init)) call set_group_size(init)
@@ -652,11 +733,13 @@ subroutine set_group_size_init(init)
 
     if (.not.group_init) then
         ! Setup the size of line group to a default value
-        if (all(mod(N_proc,5)==0)) then
+        if (all(mod(mesh_sc%N_proc,8)==0)) then
+            group_size = 8
+        else if (all(mod(mesh_sc%N_proc,5)==0)) then
             group_size = 5
-        else if (all(mod(N_proc,4)==0)) then
+        else if (all(mod(mesh_sc%N_proc,4)==0)) then
             group_size = 4
-        else if (all(mod(N_proc,2)==0)) then
+        else if (all(mod(mesh_sc%N_proc,2)==0)) then
             group_size = 2
         else
             group_size = 1
@@ -664,12 +747,14 @@ subroutine set_group_size_init(init)
         ! And now group size is initialized !
         group_init = .true.
     else
-        domain_size(1,:) = (/N_proc(2), N_proc(3)/)
-        domain_size(2,:) = (/N_proc(1), N_proc(3)/)
-        domain_size(3,:) = (/N_proc(1), N_proc(2)/)
+        domain_size(1,:) = (/mesh_sc%N_proc(2), mesh_sc%N_proc(3)/)
+        domain_size(2,:) = (/mesh_sc%N_proc(1), mesh_sc%N_proc(3)/)
+        domain_size(3,:) = (/mesh_sc%N_proc(1), mesh_sc%N_proc(2)/)
 
         where (mod(domain_size,group_size)/=0)
-            where(mod(domain_size,5)==0)
+            where(mod(domain_size,8)==0)
+                group_size=8
+            elsewhere(mod(domain_size,5)==0)
                 group_size=5
             elsewhere(mod(domain_size,4)==0)
                 group_size=4
@@ -683,5 +768,23 @@ subroutine set_group_size_init(init)
 
 end subroutine set_group_size_init
 
+
+!> Save data about the cartesian mesh create in cart_topology module
+!>    @param[out]   mesh    = varialbe of type cartesian_mesh where the data about mesh are save
+subroutine mesh_save_default(mesh)
+
+    implicit none
+
+    ! Input/Output
+    type(cartesian_mesh), intent(out)       :: mesh
+    ! Other local variables
+    !integer                                 :: direction    ! integer matching to a direction (X, Y or Z)
+
+    mesh = mesh_sc
+
+end subroutine mesh_save_default
+
+
+
 end module cart_topology
 !> @}
diff --git a/HySoP/src/scalesInterface/output/vtkxml.f90 b/HySoP/src/scalesInterface/output/vtkxml.f90
index 41860b754..bbbe878d7 100644
--- a/HySoP/src/scalesInterface/output/vtkxml.f90
+++ b/HySoP/src/scalesInterface/output/vtkxml.f90
@@ -33,7 +33,7 @@
 module vtkxml
 
     use precision_tools
-    use cart_mesh
+    use cart_mesh_tools
 
     implicit none
 
@@ -236,7 +236,6 @@ end subroutine vtkxml_init_field_basic
 subroutine vtkxml_init_field_iodata(io_info, tag)
 
     use mpi
-    use cart_mesh
 
     ! Input/Output
     type(io_data), intent(in)                   :: io_info
@@ -323,8 +322,8 @@ subroutine vtkxml_scalar(tag, values, f_name)
         & field_data(tag)%piece_extend(rank3D,3,1), &
         & field_data(tag)%piece_extend(rank3D,3,2),'"'
     write(44, '(4x,a)') 'Origin="0 0 0"'
-    write(44, '(4x,a,f6.4,x,f6.4,x,f6.4,a)') 'Spacing="', field_data(tag)%mesh%d_space(1), &
-        & field_data(tag)%mesh%d_space(2), field_data(tag)%mesh%d_space(3), '">'
+    write(44, '(4x,a,f6.4,x,f6.4,x,f6.4,a)') 'Spacing="', field_data(tag)%mesh%dx(1), &
+        & field_data(tag)%mesh%dx(2), field_data(tag)%mesh%dx(3), '">'
     write(44, '(4x,a,i0,x,i0,x,i0,x,i0,x,i0,x,i0,a)') '<Piece Extent="', &
         & field_data(tag)%mesh%absolute_extend(1,1), &
         & field_data(tag)%mesh%absolute_extend(1,2), &
@@ -392,8 +391,8 @@ subroutine parallel_file_description(tag)
             & 1, field_data(tag)%mesh%N(1), 1, field_data(tag)%mesh%N(2), 1, field_data(tag)%mesh%N(3), '"'
     write(45, '(4x,a)') 'GhostLevel="0"'
     write(45, '(4x,a)') 'Origin="0 0 0"'
-    write(45, '(4x,a,f7.5,x,f7.5,x,f7.5,a)') 'Spacing="', field_data(tag)%mesh%d_space(1), &
-        & field_data(tag)%mesh%d_space(2), field_data(tag)%mesh%d_space(3), '">'
+    write(45, '(4x,a,f7.5,x,f7.5,x,f7.5,a)') 'Spacing="', field_data(tag)%mesh%dx(1), &
+        & field_data(tag)%mesh%dx(2), field_data(tag)%mesh%dx(3), '">'
     ! Write information about data field
     write(45,'(6x,a,a,a)') '<PPointData Scalars="', trim(field_data(tag)%f_name),'">'
     write(45,'(8x,a,a,a)') '<PDataArray type="Float64" Name="', trim(field_data(tag)%f_name),'" NumberOfComponents="1" >'
diff --git a/HySoP/src/scalesInterface/output/vtkxml_bin.f90 b/HySoP/src/scalesInterface/output/vtkxml_bin.f90
index 4c7adb618..4f6dc68af 100644
--- a/HySoP/src/scalesInterface/output/vtkxml_bin.f90
+++ b/HySoP/src/scalesInterface/output/vtkxml_bin.f90
@@ -34,7 +34,7 @@
 module vtkxml_bin
 
     use precision_tools
-    use cart_mesh
+    use cart_mesh_tools
 
     implicit none
 
@@ -296,8 +296,8 @@ subroutine parallel_scalar(tag, values, f_name)
     write(buffer_char, '(4x,a)') 'Origin="0 0 0"'
     write(44) trim(buffer_char)
     write(44) char(10)
-    write(buffer_char, '(4x,a,f6.4,x,f6.4,x,f6.4,a)') 'Spacing="', field_data(tag)%mesh%d_space(1), &
-        & field_data(tag)%mesh%d_space(2), field_data(tag)%mesh%d_space(3), '">'
+    write(buffer_char, '(4x,a,f6.4,x,f6.4,x,f6.4,a)') 'Spacing="', field_data(tag)%mesh%dx(1), &
+        & field_data(tag)%mesh%dx(2), field_data(tag)%mesh%dx(3), '">'
     write(44) trim(buffer_char)
     write(44) char(10)
     write(buffer_char, '(4x,a,i0,x,i0,x,i0,x,i0,x,i0,x,i0,a)') '<Piece Extent="', &
@@ -394,8 +394,8 @@ subroutine parallel_file_description(tag)
             & 1, field_data(tag)%mesh%N(1), 1, field_data(tag)%mesh%N(2), 1, field_data(tag)%mesh%N(3), '"'
     write(45, '(4x,a)') 'GhostLevel="0"'
     write(45, '(4x,a)') 'Origin="0 0 0"'
-    write(45, '(4x,a,f7.5,x,f7.5,x,f7.5,a)') 'Spacing="', field_data(tag)%mesh%d_space(1), &
-        & field_data(tag)%mesh%d_space(2), field_data(tag)%mesh%d_space(3), '">'
+    write(45, '(4x,a,f7.5,x,f7.5,x,f7.5,a)') 'Spacing="', field_data(tag)%mesh%dx(1), &
+        & field_data(tag)%mesh%dx(2), field_data(tag)%mesh%dx(3), '">'
     ! Write information about data field
     write(45,'(6x,a,a,a)') '<PPointData Scalars="', trim(field_data(tag)%f_name),'">'
     write(45,'(8x,a,a,a)') '<PDataArray type="Float64" Name="', trim(field_data(tag)%f_name),'" NumberOfComponents="1" >'
diff --git a/HySoP/src/scalesInterface/particles/advec.f90 b/HySoP/src/scalesInterface/particles/advec.f90
index 6d81d34a3..ef7e703ed 100644
--- a/HySoP/src/scalesInterface/particles/advec.f90
+++ b/HySoP/src/scalesInterface/particles/advec.f90
@@ -1,3 +1,4 @@
+!USEFORTEST advec
 !> @addtogroup part
 !! @{
 !------------------------------------------------------------------------------
@@ -32,7 +33,11 @@ module advec
     !> dimensionnal splitting (eg classical, Strang or particle)
     character(len=str_short), private   :: dim_splitting
     !> Group size along current direction
-    integer, private, dimension(2)  :: gsX, gsY, gsZ
+    integer, protected, dimension(2)  :: gsX, gsY, gsZ
+    !> Indice of transverse directions
+    integer, protected                :: gp_dir1, gp_dir2
+    !> Indice of current direction
+    integer, protected                :: line_dir
 
 
     ! ===== Public procedures =====
@@ -45,8 +50,15 @@ module advec
     public                                          :: advec_step_Torder1   ! advec the scalar field during a time step.
     public                                          :: advec_step_Torder2   ! advec the scalar field during a time step.
 
+!TODO passer les pointeurs en protected
+!    ! Remeshing formula
+!    procedure(AC_remesh), pointer, protected        :: advec_remesh         => null()
+!    ! Particle velocity initialisation
+!    procedure(AC_init_p_V), pointer, protected      :: advec_init_velo     => null()
     ! Remeshing formula
-    procedure(AC_remesh), pointer, private          :: advec_remesh         => null()
+    procedure(AC_remesh), pointer, public           :: advec_remesh         => null()
+    ! Particle velocity initialisation
+    procedure(AC_init_p_V), pointer, public         :: advec_init_velo     => null()
 
 contains
 
@@ -122,12 +134,12 @@ subroutine advec_init(order, stab_coeff, verbosity, dim_split)
             advec_remesh => AC_remesh_lambda_group ! or Xremesh_O4
         case('p_L2')
             advec_remesh => AC_remesh_limit_lambda_group    ! limited and corrected lambda 2
-        case('p_M4')
-            advec_remesh => AC_remesh_Mprime_group ! Xremesh_Mprime4
         case('p_M6')
             advec_remesh => AC_remesh_Mprime_group ! Xremesh_Mprime6
         case('p_M8')
-            advec_remesh => AC_remesh_Mprime_group ! Xremesh_Mprime8
+            advec_remesh => AC_remesh_Mprime_group ! Xremesh_Mprime6
+        case('p_L4')
+            advec_remesh => AC_remesh_Mprime_group ! Lambda 4,4
         case default
             advec_remesh => AC_remesh_lambda_group ! or Xremesh_O2
     end select
@@ -144,7 +156,41 @@ subroutine advec_init(order, stab_coeff, verbosity, dim_split)
 end subroutine advec_init
 
 
-!> Solve advection equation - order 1 in time (classic dimensional splitting)
+!> Adjust 1D solver to advect scalar field along X
+subroutine advec_setup_alongX()
+    use advecX, only : advecX_init_group
+    use advec_common    ! Some procedures common to advection along all directions
+    call AC_remesh_setup_alongX()
+    advec_init_velo => advecX_init_group
+    gp_dir1 = 2
+    gp_dir2 = 3
+    line_dir = 1
+end subroutine advec_setup_alongX
+
+!> Adjust 1D solver to advect scalar field along Y
+subroutine advec_setup_alongY()
+    use advecY, only : advecY_init_group
+    use advec_common    ! Some procedures common to advection along all directions
+    call AC_remesh_setup_alongY()
+    advec_init_velo => advecY_init_group
+    line_dir = 2
+    gp_dir1 = 1
+    gp_dir2 = 3
+end subroutine advec_setup_alongY
+
+!> Adjust 1D solver to advect scalar field along Z
+subroutine advec_setup_alongZ()
+    use advecZ, only : advecZ_init_group
+    use advec_common    ! Some procedures common to advection along all directions
+    call AC_remesh_setup_alongZ()
+    advec_init_velo => advecZ_init_group
+    gp_dir1 = 1
+    gp_dir2 = 2
+    line_dir = 3
+end subroutine advec_setup_alongZ
+
+
+!> Solve advection equation - order 1 in time (order 2 dimensional splitting)
 !!    @param[in]        dt          = time step
 !!    @param[in]        Vx          = velocity along x (could be discretised on a bigger mesh then the scalar)
 !!    @param[in]        Vy          = velocity along y
@@ -157,9 +203,12 @@ subroutine advec_step_Torder1(dt, Vx, Vy, Vz, scal)
     real(WP), dimension(:,:,:), intent(in)      :: Vx, Vy, Vz
     real(WP), dimension(:,:,:), intent(inout)   :: scal
 
-    call advecX_calc_no_com(dt, Vx, scal)
-    call advecY_calc(dt, Vy, scal)
-    call advecZ_calc(dt, Vz, scal)
+    call advec_setup_alongX()
+    call advec_X_basic_no_com(dt, gsX, Vx, scal)
+    call advec_setup_alongY()
+    call advec_1D_basic(dt, gsY, Vy, scal)
+    call advec_setup_alongZ()
+    call advec_1D_basic(dt, gsZ, Vz, scal)
 
 end subroutine advec_step_Torder1
 
@@ -177,82 +226,29 @@ subroutine advec_step_Torder2(dt, Vx, Vy, Vz, scal)
     real(WP), dimension(:,:,:), intent(in)      :: Vx, Vy, Vz
     real(WP), dimension(:,:,:), intent(inout)   :: scal
 
-    call advecX_calc_no_com(dt/2.0, Vx, scal)
-    call advecY_calc(dt/2.0, Vy, scal)
-    call advecZ_calc(dt/2.0, Vz, scal)
-    call advecZ_calc(dt/2.0, Vz, scal)
-    call advecY_calc(dt/2.0, Vy, scal)
-    call advecX_calc_no_com(dt/2.0, Vx, scal)
+    call advec_setup_alongX()
+    call advec_X_basic_no_com(dt/2.0, gsX, Vx, scal)
+    call advec_setup_alongY()
+    call advec_1D_basic(dt/2.0, gsY, Vy, scal)
+    call advec_setup_alongZ()
+    call advec_1D_basic(dt/2.0, gsZ, Vz, scal)
+    call advec_1D_basic(dt/2.0, gsZ, Vz, scal)
+    call advec_setup_alongY()
+    call advec_1D_basic(dt/2.0, gsY, Vy, scal)
+    call advec_setup_alongX()
+    call advec_X_basic_no_com(dt/2.0, gsX, Vx, scal)
 
 end subroutine advec_step_Torder2
 
 
-!> Scalar advection (this procedure call the right solver, depending on the simulation setup)
+!> Scalar advection along one direction - variant for cases with no communication
 !!    @param[in]        dt          = time step
-!!    @param[in]        Vx          = velocity along X (could be discretised on a bigger mesh then the scalar)
+!!    @param[in]        V_comp      = velocity along X (could be discretised on a bigger mesh then the scalar)
 !!    @param[in,out]    scal3D      = scalar field to advect
-subroutine advecX_calc(dt, Vx, scal3D)
-
-    use advecX, only : advecX_init_group ! Procedure specific to advection along X
-    use advec_common    ! Some procedures common to advection along all directions
-    use advec_variables ! contains info about solver parameters and others.
-    use cart_topology   ! Description of mesh and of mpi topology
-
-    ! Input/Output
-    real(WP), intent(in)                                                :: dt
-    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vx
-    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
-    ! Other local variables
-    integer, parameter                                  :: direction =1 ! current direction
-    integer                                             :: j,k          ! indice of the currend mesh point
-    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
-    real(WP),dimension(N_proc(direction),gsX(1),gsX(2)) :: p_pos_adim   ! adimensionned particles position
-    real(WP),dimension(N_proc(direction),gsX(1),gsX(2)) :: p_V          ! particles velocity
-
-    ! Allocate send_ind_min/max
-    if(allocated(send_group_min)) deallocate(send_group_min)
-    allocate(send_group_min(group_size(direction,1),group_size(direction,2)))
-    if(allocated(send_group_max)) deallocate(send_group_max)
-    allocate(send_group_max(group_size(direction,1),group_size(direction,2)))
-
-    ! Initialise the pointer for optimized remeshing
-    call AC_setup_alongX()
-
-    ind_group = 0
-
-    do k = 1, N_proc(3), gsX(2)
-        ind_group(2) = ind_group(2) + 1
-        ind_group(1) = 0
-        do j = 1, N_proc(2), gsX(1)
-            ind_group(1) = ind_group(1) + 1
-
-            ! ===== Init particles =====
-            call advecX_init_group(Vx, j, k, gsX, p_pos_adim, p_V)
-
-            ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_velocity_interpol_group(dt, direction, gsX, ind_group, p_pos_adim, p_V)
-
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
-
-            ! ===== Remeshing =====
-            call advec_remesh(direction, ind_group, gsX, p_pos_adim, p_V, j, k, scal3D, dt)
-
-        end do
-    end do
-
-    deallocate(send_group_min)
-    deallocate(send_group_max)
-
-end subroutine advecX_calc
-
-
-!> Scalar advection along X - variant for cases with no communication
-!!    @param[in]        dt          = time step
-!!    @param[in]        Vx          = velocity along X (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal3D      = scalar field to advect
-subroutine advecX_calc_no_com(dt, Vx, scal3D)
+!> Details
+!!   Work only for direction = X. Basic (and very simple) remeshing has just to
+!! be add for other direction.
+subroutine advec_X_basic_no_com(dt, gs, V_comp, scal3D)
 
     use advecX          ! Procedure specific to advection along X
     use advec_common    ! Some procedures common to advection along all directions
@@ -260,163 +256,165 @@ subroutine advecX_calc_no_com(dt, Vx, scal3D)
     use cart_topology   ! Description of mesh and of mpi topology
 
     ! Input/Output
-    real(WP), intent(in)                                                :: dt
-    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vx
-    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    real(WP), intent(in)                          :: dt
+    integer, dimension(2), intent(in)             :: gs
+    real(WP), dimension(:,:,:), intent(in)        :: V_comp
+    real(WP), dimension(:,:,:), intent(inout)     :: scal3D
     ! Other local variables
-    integer, parameter                                  :: direction =1 ! current direction
     integer                                             :: j,k          ! indice of the currend mesh point
     integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
-    real(WP),dimension(N_proc(direction),gsX(1),gsX(2)) :: p_pos_adim   ! adimensionned particles position
-    real(WP),dimension(N_proc(direction),gsX(1),gsX(2)) :: p_V          ! particles velocity
+    real(WP),dimension(mesh_sc%N_proc(line_dir),gs(1),gs(2))  :: p_pos_adim   ! adimensionned particles position
+    real(WP),dimension(mesh_sc%N_proc(line_dir)+1,gs(1),gs(2)):: p_V          ! particles velocity
 
     ind_group = 0
 
-    do k = 1, N_proc(3), gsX(2)
+! Work only for X direction - add no_com remeshing along Y and Z to use it for
+! advection along theses directions.
+    line_dir = 1
+    gp_dir1 = 2
+    gp_dir2 = 3
+
+    do k = 1, mesh_sc%N_proc(gp_dir2), gs(2)
         ind_group(2) = ind_group(2) + 1
         ind_group(1) = 0
-        do j = 1, N_proc(2), gsX(1)
+        do j = 1, mesh_sc%N_proc(gp_dir1), gs(1)
             ind_group(1) = ind_group(1) + 1
 
             ! ===== Init particles =====
-            call advecX_init_group(Vx, j, k, gsX, p_pos_adim, p_V)
+            ! p_pos is used to store velocity at grid point
+            call advec_init_velo(V_comp, j, k, gs, p_pos_adim)
+            ! p_V = middle point position = position at middle point for RK2 scheme
+            call AC_get_p_pos_adim(p_V, p_pos_adim, 0.5_WP*dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
 
             ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_velocity_interpol_no_com(dt, direction, gsX, p_pos_adim, p_V)
-
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+            ! -- Compute velocity (with a RK2 scheme): p_V = velocity at middle point position --
+            ! Note that p_pos is used as velocity component storage
+            call AC_interpol_lin_no_com(line_dir, gs, p_pos_adim, p_V)
+            ! p_v = velocity at middle point position
+            ! -- Push particles --
+            call AC_get_p_pos_adim(p_pos_adim, p_V, dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+            ! Now p_pos = particle position and p_V = particle velocity
 
             ! ===== Remeshing =====
-            call advecX_remesh_no_com(ind_group, gsX, p_pos_adim, p_V, j, k, scal3D, dt)
+            call advecX_remesh_no_com(ind_group, gs, p_pos_adim, p_V, j, k, scal3D, dt)
 
         end do
     end do
 
-end subroutine advecX_calc_no_com
+end subroutine advec_X_basic_no_com
 
 
-!> Scalar advection along Y (this procedure call the right solver, depending on the simulation setup)
+!> Scalar advection along one direction (this procedure call the right solver, depending on the simulation setup)
 !!    @param[in]        dt          = time step
-!!    @param[in]        Vy          = velocity along y (could be discretised on a bigger mesh then the scalar)
+!!    @param[in]        gs          = size of the work item along transverse direction
+!!    @param[in]        V_comp      = velocity component
 !!    @param[in,out]    scal3D      = scalar field to advect
-subroutine advecY_calc(dt, Vy, scal3D)
+subroutine advec_1D_basic(dt, gs, V_comp, scal3D)
 
-    use advecY, only : advecY_init  ! Procedure specific to advection along Y
-    use advec_common    ! Some procedures common to advection along all directions
+    use advecX, only : advecX_init_group    ! procdure devoted to advection along Z
+    use advecY, only : advecY_init_group    ! procdure devoted to advection along Z
+    use advecZ, only : advecZ_init_group    ! procdure devoted to advection along Z
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! Description of mesh and of mpi topology
+    use advec_common    ! some procedures common to advection along all line_dirs
 
     ! Input/Output
-    real(WP), intent(in)                                                :: dt
-    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vy
-    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    real(WP), intent(in)                          :: dt
+    integer, dimension(2), intent(in)             :: gs
+    real(WP), dimension(:,:,:), intent(in)        :: V_comp
+    real(WP), dimension(:,:,:), intent(inout)     :: scal3D
     ! Other local variables
-    integer, parameter                                  :: direction =2 ! current direction
-    integer                                             :: i,k          ! indice of the currend mesh point
-    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
-    real(WP),dimension(N_proc(direction),gsY(1),gsY(2)) :: p_pos_adim   ! adimensionned particles position
-    real(WP),dimension(N_proc(direction),gsY(1),gsY(2)) :: p_V          ! particles velocity
-
-    ! Allocate send_ind_min/max
-    if(allocated(send_group_min)) deallocate(send_group_min)
-    allocate(send_group_min(group_size(direction,1),group_size(direction,2)))
-    if(allocated(send_group_max)) deallocate(send_group_max)
-    allocate(send_group_max(group_size(direction,1),group_size(direction,2)))
-
-    ! Initialise the pointer for optimized remeshing
-    call AC_setup_alongY()
+    integer                                       :: i,j          ! indice of the currend mesh point
+    integer, dimension(2)                         :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP), dimension(mesh_sc%N_proc(line_dir),gs(1),gs(2))  :: p_pos_adim ! adimensionned particles position
+    real(WP), dimension(mesh_sc%N_proc(line_dir)+1,gs(1),gs(2)):: p_V        ! particles velocity
 
     ind_group = 0
 
-    do k = 1, N_proc(3), gsY(2)
+    do j = 1, mesh_sc%N_proc(gp_dir2), gs(2)
         ind_group(2) = ind_group(2) + 1
         ind_group(1) = 0
-        do i = 1, N_proc(1), gsY(1)
+        do i = 1, mesh_sc%N_proc(gp_dir1), gs(1)
             ind_group(1) = ind_group(1) + 1
 
             ! ===== Init particles =====
-            call advecY_init(Vy, i, k, gsY, p_pos_adim, p_V)
+            call advec_init_velo(V_comp, i, j, gs, p_pos_adim)
+            ! p_pos is used to store velocity at grid point
+            call AC_get_p_pos_adim(p_V, p_pos_adim, 0.5_WP*dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+            ! p_V = middle point position = position at middle point for RK2 scheme
 
             ! ===== Advection =====
             ! -- Compute velocity (with a RK2 scheme) --
-            call AC_velocity_interpol_group(dt, direction, gsY, ind_group, p_pos_adim, p_V)
-
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+            ! Note that p_pos is used as velocity component storage
+            call AC_interpol_lin(line_dir, gs, ind_group, p_pos_adim, p_V)
+            ! p_v = velocity at middle point position
+            ! -- Push particles --
+            call AC_get_p_pos_adim(p_pos_adim, p_V, dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+            ! Now p_pos = particle position and p_V = particle velocity
 
             ! ===== Remeshing =====
-            call advec_remesh(direction, ind_group, gsY, p_pos_adim, p_V, i, k, scal3D, dt)
+            call advec_remesh(line_dir, ind_group, gs, p_pos_adim, p_V, i,j,scal3D, dt)
 
         end do
     end do
 
-    deallocate(send_group_min)
-    deallocate(send_group_max)
+end subroutine advec_1D_basic
 
-end subroutine advecY_calc
 
-
-!> Scalar advection alongZ (this procedure call the right solver, depending on the simulation setup)
+!> Scalar advection along one direction (this procedure call the right solver, depending on the simulation setup)
 !!    @param[in]        dt          = time step
-!!    @param[in]        Vz          = velocity along y (could be discretised on a bigger mesh then the scalar)
+!!    @param[in]        gs          = size of the work item along transverse direction
+!!    @param[in]        V_comp      = velocity component
 !!    @param[in,out]    scal3D      = scalar field to advect
-subroutine advecZ_calc(dt, Vz, scal3D)
+subroutine advec_1D_Vcoarse(dt, gs, V_coarse, V_fine, scal3D)
 
+    use advecX, only : advecX_init_group    ! procdure devoted to advection along Z
+    use advecY, only : advecY_init_group    ! procdure devoted to advection along Z
     use advecZ, only : advecZ_init_group    ! procdure devoted to advection along Z
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! Description of mesh and of mpi topology
-    use advec_common    ! some procedures common to advection along all directions
+    use advec_common    ! some procedures common to advection along all line_dirs
 
     ! Input/Output
-    real(WP), intent(in)                                                :: dt
-    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vz
-    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    real(WP), intent(in)                          :: dt
+    integer, dimension(2), intent(in)             :: gs
+    real(WP), dimension(:,:,:), intent(in)        :: V_coarse
+    real(WP), dimension(:,:,:), intent(in)        :: V_fine
+    real(WP), dimension(:,:,:), intent(inout)     :: scal3D
     ! Other local variables
-    integer, parameter                                  :: direction =3 ! current direction
-    integer                                             :: i,j          ! indice of the currend mesh point
-    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
-    real(WP), dimension(N_proc(direction),gsZ(1),gsZ(2))  :: p_pos_adim ! adimensionned particles position
-    real(WP), dimension(N_proc(direction),gsZ(1),gsZ(2))  :: p_V        ! particles velocity
-
-    ! Allocate send_ind_min/max
-    if(allocated(send_group_min)) deallocate(send_group_min)
-    allocate(send_group_min(group_size(direction,1),group_size(direction,2)))
-    if(allocated(send_group_max)) deallocate(send_group_max)
-    allocate(send_group_max(group_size(direction,1),group_size(direction,2)))
-
-    ! Initialise the pointer for optimized remeshing
-    call AC_setup_alongZ()
+    integer                                       :: i,j          ! indice of the currend mesh point
+    integer, dimension(2)                         :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP), dimension(mesh_sc%N_proc(line_dir),gs(1),gs(2))  :: p_pos_adim ! adimensionned particles position
+    real(WP), dimension(mesh_sc%N_proc(line_dir)+1,gs(1),gs(2)):: p_V        ! particles velocity
 
     ind_group = 0
 
-    do j = 1, N_proc(2), gsZ(2)
+    do j = 1, mesh_sc%N_proc(gp_dir2), gs(2)
         ind_group(2) = ind_group(2) + 1
         ind_group(1) = 0
-        do i = 1, N_proc(1), gsZ(1)
+        do i = 1, mesh_sc%N_proc(gp_dir1), gs(1)
             ind_group(1) = ind_group(1) + 1
 
             ! ===== Init particles =====
-            call advecZ_init_group(Vz, i, j, gsZ, p_pos_adim, p_V)
+            call AC_get_p_pos_adim(p_V, V_fine, 0.5_WP*dt, &
+                  & mesh_sc%dx(line_dir), mesh_V%dx(line_dir), mesh_sc%N_proc(line_dir), i, j)
+            ! p_V = middle point position = position at middle point for RK2 scheme
 
             ! ===== Advection =====
             ! -- Compute velocity (with a RK2 scheme) --
-            call AC_velocity_interpol_group(dt, direction, gsZ, ind_group, p_pos_adim, p_V)
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+            call AC_interpol_plus(line_dir, gs, ind_group, i, j, V_coarse, p_V)
+            ! p_v = velocity at middle point position
+            ! -- Push particles --
+            call AC_get_p_pos_adim(p_pos_adim, p_V, dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+            ! Now p_pos = particle position and p_V = particle velocity
 
             ! ===== Remeshing =====
-            call advec_remesh(direction, ind_group, gsZ, p_pos_adim, p_V, i,j,scal3D, dt)
+            call advec_remesh(line_dir, ind_group, gs, p_pos_adim, p_V, i,j,scal3D, dt)
 
         end do
     end do
 
-    deallocate(send_group_min)
-    deallocate(send_group_max)
-
-end subroutine advecZ_calc
-
+end subroutine advec_1D_Vcoarse
 
 
 !> ===== Private procedure =====
diff --git a/HySoP/src/scalesInterface/particles/advecX.f90 b/HySoP/src/scalesInterface/particles/advecX.f90
index 7321d0365..65c31ea99 100644
--- a/HySoP/src/scalesInterface/particles/advecX.f90
+++ b/HySoP/src/scalesInterface/particles/advecX.f90
@@ -1,3 +1,4 @@
+!USEFORTEST advec
 !> @addtogroup part
 
 !------------------------------------------------------------------------------
@@ -80,11 +81,19 @@ subroutine advecX_remesh_init()
     use advec_variables         ! solver context
 
     select case(trim(type_solv))
+    case ('p_L4')
+        advecX_remesh_no_com => advecX_remesh_no_type_no_com
     case ('p_M8')
         advecX_remesh_no_com => advecX_remesh_no_type_no_com
     case ('p_M6')
         !advecX_remesh_com => advecX_remesh_in_buffer_Mprime6
         advecX_remesh_no_com => advecX_remesh_no_type_no_com
+    case ('p_M4')
+        !advecX_remesh_com => advecX_remesh_in_buffer_Mprime6
+        advecX_remesh_no_com => advecX_remesh_no_type_no_com
+    case ('d_M4')
+        !advecX_remesh_com => advecX_remesh_in_buffer_Mprime6
+        advecX_remesh_no_com => advecX_remesh_no_type_no_com
     case ('p_L2')
         !advecX_remesh_com => advecX_remesh_in_buffer_limited
         advecX_remesh_no_com => advecX_remesh_limited_no_com
@@ -96,22 +105,18 @@ subroutine advecX_remesh_init()
 end subroutine advecX_remesh_init
 
 
-!>Remesh particle inside a buffer. Use corrected lambda remeshing polynoms.
-!!@autor Jean-Baptiste Lagaert
-!!  @param[in]      gs              = size of group of line along the current direction
-!!  @param[in]      i,j             = X- and Y-coordinates of the first line along X inside the current group of lines.
-!!  @param[in]      ind_min         = indices from the original array "pos_in_buffer" does not start from 1.
+!> Remesh particle inside a buffer. Use corrected lambda remeshing polynoms.
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        ind_min     = indices from the original array "pos_in_buffer" does not start from 1.
 !!                                    It actually start from ind_min and to avoid access out of range,
 !!                                    a gap of (-ind_min) will be added to each indices from "pos_in_buffer.
-!!  @param[in]      p_pos_adim      = adimensionned  particles position
-!!  @param[in]      bl_type         = table of blocks type (center of left)
-!!  @param[in]      bl_tag          = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
-!!                                    and the begining of the following one is tagged)
-!!  @param[in]      send_min        = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
-!!  @param[in]      send_max        = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
-!!  @param[in]      scalar          = the initial scalar field transported by particles
-!!  @param[out]     buffer          = buffer where particles are remeshed
-!!  @param[in,out]  pos_in_buffer   = information about where remesing the particle inside the buffer
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        scalar      = the initial scalar field transported by particles
+!!    @param[out]       buffer      = buffer where particles are remeshed
+!!    @param[in]        remesh_line = subroutine wich remesh a line of particle with the right remeshing formula
 subroutine advecX_remesh_in_buffer_lambda(gs, j, k, ind_min, p_pos_adim, bl_type, bl_tag, send_min, send_max, &
         & scalar, buffer, pos_in_buffer)
 
@@ -135,7 +140,6 @@ subroutine advecX_remesh_in_buffer_lambda(gs, j, k, ind_min, p_pos_adim, bl_type
                                                                         ! in part corresponding to different processes
 
     ! Other local variables
-    integer, dimension(2)                   :: send_range_all ! maximal (among the lines group) distance between me and processus to wich I send information
     integer                                 :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the
     type(real_pter),dimension(:),allocatable:: remeshX_pter  ! pointer to send buffer in which scalar are sorted by line indice.
                                                             ! sorted by receivers
@@ -143,26 +147,26 @@ subroutine advecX_remesh_in_buffer_lambda(gs, j, k, ind_min, p_pos_adim, bl_type
     integer                                 :: ind          ! indice of the current particle inside the current line.
 
     ! ===== Remeshing into the buffer by using pointer array =====
-    ! -- Allocate remeshX_pter --
-    send_range_all(1) = minval(send_min)
-    send_range_all(2) = maxval(send_max)
-    allocate(remeshX_pter(send_range_all(1):send_range_all(2)))
     do i2 = 1, gs(2)
         do i1 = 1, gs(1)
+            send_j_min = send_min(i1,i2)
+            send_j_max = send_max(i1,i2)
 
-            do ind = send_min(i1,i2), send_max(i1,i2)
-                proc_gap = floor(real(ind-1)/N_proc(direction)) - (ind_min-1)
+            ! -- Allocate remeshX_pter --
+            allocate(remeshX_pter(send_j_min:send_j_max))
+            do ind = send_j_min, send_j_max
+                proc_gap = floor(real(ind-1)/mesh_sc%N_proc(direction)) - (ind_min-1)
                 remeshX_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
                 pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
             end do
 
             ! -- Remesh the particles in the buffer --
             call AC_remesh_lambda_pter(direction, p_pos_adim(:,i1,i2), scalar(:,j+i1-1,k+i2-1), &
-                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_range_all(1), remeshX_pter)
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, remeshX_pter)
 
+            deallocate(remeshX_pter)
         end do
     end do
-    deallocate(remeshX_pter)
 
     ! Scalar must be re-init before ending the remeshing
     scalar(:,j:j+gs(1)-1,k:k+gs(2)-1) = 0
@@ -226,7 +230,7 @@ subroutine advecX_remesh_in_buffer_limit_lambda(gs, j, k, ind_min, p_pos_adim, b
             ! -- Allocate remeshX_pter --
             allocate(remeshX_pter(send_j_min:send_j_max))
             do ind = send_j_min, send_j_max
-                proc_gap = floor(real(ind-1)/N_proc(direction)) - (ind_min-1)
+                proc_gap = floor(real(ind-1)/mesh_sc%N_proc(direction)) - (ind_min-1)
                 remeshX_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
                 pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
             end do
@@ -282,7 +286,7 @@ subroutine advecX_remesh_in_buffer_Mprime(gs, j, k, ind_min, p_pos_adim, send_mi
                                                             ! sorted by receivers
     integer                                 :: i1, i2       ! indice of a line into the group
     integer                                 :: ind          ! indice of the current particle inside the current line.
-    real(WP), dimension(N_proc(direction))  :: pos_translat ! translation of p_pos_adim as array indice
+    real(WP), dimension(mesh_sc%N_proc(direction))  :: pos_translat ! translation of p_pos_adim as array indice
                                                             ! are now starting from 1 and not ind_min
 
 
@@ -293,7 +297,7 @@ subroutine advecX_remesh_in_buffer_Mprime(gs, j, k, ind_min, p_pos_adim, send_mi
             ! -- Allocate remeshX_pter --
             allocate(remeshX_pter(send_min(i1,i2):send_max(i1,i2)))
             do ind = send_min(i1,i2), send_max(i1,i2)
-                proc_gap = floor(real(ind-1)/N_proc(direction)) - (ind_min-1)
+                proc_gap = floor(real(ind-1)/mesh_sc%N_proc(direction)) - (ind_min-1)
                 remeshX_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
                 pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
             end do
@@ -301,7 +305,7 @@ subroutine advecX_remesh_in_buffer_Mprime(gs, j, k, ind_min, p_pos_adim, send_mi
             pos_translat = p_pos_adim(:,i1,i2) - send_min(i1,i2) + 1
 
             ! -- Remesh the particles in the buffer --
-            do ind = 1, N_proc(direction)
+            do ind = 1, mesh_sc%N_proc(direction)
                 call AC_remesh_Mprime_pter(pos_translat(ind), scalar(ind,j+i1-1,k+i2-1), remeshX_pter)
             end do
 
@@ -337,7 +341,7 @@ subroutine advecX_remesh_type_no_com(ind_group, gs, p_pos_adim, p_V, j, k , scal
     integer, intent(in)                         :: j, k
     real(WP),dimension(:,:,:),intent(inout)     :: scal
     real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
-    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: p_V          ! particles velocity
     real(WP), intent(in)                        :: dt
 
     ! Other local variables
@@ -345,8 +349,9 @@ subroutine advecX_remesh_type_no_com(ind_group, gs, p_pos_adim, p_V, j, k , scal
     ! Type and block
     logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
     logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
-    ! Variable used to remesh particles in a buffer
-    real(WP),dimension(N(direction))                               :: remesh_buffer! buffer use to remesh the scalar
+    integer                                 :: N_loc
+
+    N_loc = size(scal,1)
 
     ! ===== Pre-Remeshing: Determine blocks type and tag particles =====
     call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
@@ -357,14 +362,17 @@ subroutine advecX_remesh_type_no_com(ind_group, gs, p_pos_adim, p_V, j, k , scal
         do i2 = 1, gs(2)
 
             ! -- [re-] init buffer --
-            remesh_buffer = 0
+            !remesh_buffer = 0
+            p_V(:,1,1) = 0
 
             ! -- Remesh the particles in the buffer --
             call AC_remesh_lambda_array(direction, p_pos_adim(:,i1,i2), &
-                & scal(:,j+i1-1,k+i2-1), bl_type(:,i1,i2), bl_tag(:,i1,i2), remesh_buffer)
+                & scal(:,j+i1-1,k+i2-1), bl_type(:,i1,i2), bl_tag(:,i1,i2), p_V(:,1,1))
+                !& scal(:,j+i1-1,k+i2-1), bl_type(:,i1,i2), bl_tag(:,i1,i2), remesh_buffer)
 
             ! -- Update scalar from buffer --
-            scal(:,i1+j-1,k+i2-1) = remesh_buffer
+            !scal(:,i1+j-1,k+i2-1) = remesh_buffer
+            scal(:,i1+j-1,k+i2-1) = p_V(1:N_loc,1,1)
         end do
     end do
 
@@ -393,7 +401,7 @@ subroutine advecX_remesh_limited_no_com(ind_group, gs, p_pos_adim, p_V, j, k , s
     integer, intent(in)                         :: j, k
     real(WP),dimension(:,:,:),intent(inout)     :: scal
     real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
-    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: p_V          ! particles velocity
     real(WP), intent(in)                        :: dt
 
     ! Other local variables
@@ -401,9 +409,12 @@ subroutine advecX_remesh_limited_no_com(ind_group, gs, p_pos_adim, p_V, j, k , s
     ! Type and block
     logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
     logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
-    real(WP), dimension(N_proc(direction)+1,gs(1),gs(2)):: limit        ! limitator function (divided by 8.)
+    real(WP), dimension(mesh_sc%N_proc(direction)+1,gs(1),gs(2)):: limit        ! limitator function (divided by 8.)
     ! Variable used to remesh particles in a buffer
-    real(WP),dimension(N(direction))                               :: remesh_buffer! buffer use to remesh the scalar
+    !real(WP),dimension(mesh_sc%N(direction))                               :: remesh_buffer! buffer use to remesh the scalar
+    integer                                 :: N_loc
+
+    N_loc = size(scal,1)
 
     ! ===== Pre-Remeshing I: Determine blocks type and tag particles =====
     call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
@@ -419,16 +430,21 @@ subroutine advecX_remesh_limited_no_com(ind_group, gs, p_pos_adim, p_V, j, k , s
         do i2 = 1, gs(2)
 
             ! -- [re-] init buffer --
-            remesh_buffer = 0
+            !remesh_buffer = 0
+            p_V(:,1,1) = 0
+            !p_V(:,i1,i2) = 0
 
             ! -- Remesh the particles in the buffer --
             call AC_remesh_lambda2limited_array(direction, p_pos_adim(:,i1,i2), &
-                & scal(:,j+i1-1,k+i2-1), bl_type(:,i1,i2), bl_tag(:,i1,i2), limit(:,i1,i2), remesh_buffer)
+                & scal(:,j+i1-1,k+i2-1), bl_type(:,i1,i2), bl_tag(:,i1,i2), limit(:,i1,i2), p_V(:,1,1))
+                !& scal(:,j+i1-1,k+i2-1), bl_type(:,i1,i2), bl_tag(:,i1,i2), limit(:,i1,i2), remesh_buffer)
 
             ! -- Update scalar from buffer --
-            scal(:,i1+j-1,k+i2-1) = remesh_buffer
+            !scal(:,i1+j-1,k+i2-1) = remesh_buffer
+            scal(:,i1+j-1,k+i2-1) = p_V(1:N_loc,1,1)
         end do
     end do
+    !scal(:,i1+j-1:i1+j-1+gs(1),k+i2-1:k+i2-1+gs(2)) = p_V
 
 end subroutine advecX_remesh_limited_no_com
 
@@ -455,28 +471,33 @@ subroutine advecX_remesh_no_type_no_com(ind_group, gs, p_pos_adim, p_V, j, k , s
     integer, intent(in)                         :: j, k
     real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
     real(WP), dimension(:,:,:), intent(inout)   :: scalar       ! the initial scalar field transported by particles
-    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: p_V          ! particles velocity
     real(WP), intent(in)                        :: dt
 
     ! Other local variables
     integer                                 :: i1, i2       ! indice of a line into the group
     integer                                 :: ind          ! indice of the current particle inside the current line.
-    real(WP),dimension(N(direction))        :: remesh_buffer! buffer use to remesh the scalar
+    !real(WP),dimension(mesh_sc%N(direction))        :: remesh_buffer! buffer use to remesh the scalar
+    integer                                 :: N_loc
+
+    N_loc = size(scalar,1)
 
     ! ===== Remeshing into the buffer by using pointer array =====
     do i2 = 1, gs(2)
         do i1 = 1, gs(1)
 
             ! -- [re-] init buffer --
-            remesh_buffer = 0
+            p_V(:,1,1) = 0
 
             ! -- Remesh the particles in the buffer --
-            do ind = 1, N_proc(direction)
-                call AC_remesh_Mprime_array(direction, p_pos_adim(ind,i1,i2), scalar(ind,j+i1-1,k+i2-1), remesh_buffer)
+            do ind = 1, mesh_sc%N_proc(direction)
+                call AC_remesh_Mprime_array(direction, p_pos_adim(ind,i1,i2), scalar(ind,j+i1-1,k+i2-1), p_V(:,1,1))
+                !call AC_remesh_Mprime_array(direction, p_pos_adim(ind,i1,i2), scalar(ind,j+i1-1,k+i2-1), remesh_buffer))
             end do
 
             ! -- Update scalar from buffer --
-            scalar(:,j+i1-1,k+i2-1) = remesh_buffer
+            !scalar(:,j+i1-1,k+i2-1) = remesh_buffer
+            scalar(:,j+i1-1,k+i2-1) = p_V(1:N_loc,1,1)
 
         end do
     end do
@@ -510,7 +531,7 @@ subroutine advecX_remesh_buffer_to_scalar(gs, j, k, ind_proc, gap, begin_i1, car
     integer, dimension(:,:), intent(in)         :: cartography
     real(WP),dimension(:), intent(in)           :: buffer       ! buffer containing the data to redistribute into the local scalar field.
     real(WP), dimension(:,:,:), intent(inout)   :: scalar       ! the scalar field.
-    integer, intent(out)                        :: beg_buffer   ! first indice inside where the scalar values are stored into the buffer for the current sender processus.
+    integer, intent(inout)                      :: beg_buffer   ! first indice inside where the scalar values are stored into the buffer for the current sender processus.
                                                                 ! To know where reading data into the buffer.
 
     ! Other local variables
@@ -554,25 +575,23 @@ end subroutine advecX_remesh_buffer_to_scalar
 !!    @param[in]    j           = Y-indice of the current line
 !!    @param[in]    k           = Z-indice of the current line
 !!    @param[in]    Gsize       = size of groups (along X direction)
-!!    @param[out]   p_pos_adim  = adimensioned particles postion
 !!    @param[out]   p_V         = particle velocity
-subroutine advecX_init_group(Vx, j, k, Gsize, p_pos_adim, p_V)
+subroutine advecX_init_group(Vx, j, k, Gsize, p_V)
 
     use cart_topology   ! Description of mesh and of mpi topology
 
     ! Input/Output
-    integer, intent(in)                                                     :: j,k
-    integer, dimension(2), intent(in)                                       :: Gsize
-    real(WP), dimension(N_proc(direction),Gsize(1),Gsize(2)),intent(out)    :: p_pos_adim, p_V
-    real(WP), dimension(:,:,:), intent(in)                                  :: Vx
+    integer, intent(in)                       :: j,k
+    integer, dimension(2), intent(in)         :: Gsize
+    real(WP), dimension(:,:,:),intent(out)    ::  p_V
+    real(WP), dimension(:,:,:), intent(in)    :: Vx
     ! Other local variables
-    integer                                     :: ind          ! indice
-    integer                                     :: j_gp, k_gp   ! Y and Z indice of the current line in the group
+    integer                                   :: ind          ! indice
+    integer                                   :: j_gp, k_gp   ! Y and Z indice of the current line in the group
 
     do k_gp = 1, Gsize(2)
         do j_gp = 1, Gsize(1)
-            do ind = 1, N_proc(direction)
-                p_pos_adim(ind, j_gp, k_gp) = ind
+            do ind = 1, mesh_sc%N_proc(direction)
                 p_V(ind, j_gp, k_gp)        = Vx(ind,j+j_gp-1,k+k_gp-1)
             end do
         end do
@@ -625,7 +644,7 @@ subroutine advecX_limitator_group(gp_s, ind_group, j, k, p_pos, &
 
     ! Local variables
     real(WP),dimension(2,gp_s(1),gp_s(2))                       :: Sbuffer, Rbuffer ! buffer to exchange scalar or limitator at boundaries with neighbors.
-    real(WP),dimension(gp_s(1),gp_s(2),N_proc(direction)+1)     :: deltaS      ! first order scalar variation
+    real(WP),dimension(gp_s(1),gp_s(2),mesh_sc%N_proc(direction)+1)     :: deltaS      ! first order scalar variation
     integer                                                     :: ind          ! loop indice on particle indice
     integer                                                     :: send_request ! mpi status of nonblocking send
     integer                                                     :: rece_request ! mpi status of nonblocking receive
@@ -642,23 +661,23 @@ subroutine advecX_limitator_group(gp_s, ind_group, j, k, p_pos, &
     ! Receive ghost value, ie value from neighbors boundaries.
     tag_table = compute_tag(ind_group, tag_part_slope, direction)
     call mpi_Irecv(Rbuffer(1,1,1), com_size, MPI_DOUBLE_PRECISION, &
-            & neighbors(direction,2), tag_table(1), D_comm(direction), rece_request, ierr)
+            & neighbors(direction,1), tag_table(1), D_comm(direction), rece_request, ierr)
     ! Send ghost for the two first scalar values of each line
     Sbuffer = scalar(1:2,j:j+gp_s(1)-1,k:k+gp_s(2)-1)
     call mpi_ISsend(Sbuffer(1,1,1), com_size, MPI_DOUBLE_PRECISION, &
-            & neighbors(direction,1), tag_table(1), D_comm(direction), send_request, ierr)
+            & neighbors(direction,-1), tag_table(1), D_comm(direction), send_request, ierr)
 
     ! ===== Compute scalar variation =====
     ! -- For the "middle" block --
-    do ind = 1, N_proc(direction)-1
+    do ind = 1, mesh_sc%N_proc(direction)-1
         deltaS(:,:,ind) = scalar(ind+1,j:j+gp_s(1)-1,k:k+gp_s(2)-1) - scalar(ind,j:j+gp_s(1)-1,k:k+gp_s(2)-1)
     end do
     ! -- For the last elements  of each line --
     ! Check reception
     call mpi_wait(rece_request, rece_status, ierr)
     ! Compute delta
-    deltaS(:,:,N_proc(direction)) = Rbuffer(1,:,:) - scalar(ind,j:j+gp_s(1)-1,k:k+gp_s(2)-1)   ! scalar(N+1) - scalar(N)
-    deltaS(:,:,N_proc(direction)+1) = Rbuffer(2,:,:) - Rbuffer(1,:,:)   ! scalar(N+1) - scalar(N)
+    deltaS(:,:,mesh_sc%N_proc(direction)) = Rbuffer(1,:,:) - scalar(ind,j:j+gp_s(1)-1,k:k+gp_s(2)-1)   ! scalar(N+1) - scalar(N)
+    deltaS(:,:,mesh_sc%N_proc(direction)+1) = Rbuffer(2,:,:) - Rbuffer(1,:,:)   ! scalar(N+1) - scalar(N)
 
 
     ! ===== Compute slope and limitator =====
@@ -701,26 +720,26 @@ subroutine advecX_limitator_group_no_com(gp_s, j, k, p_pos, &
     real(WP), dimension(:,:,:), intent(out)                     :: limit        ! limitator function
 
     ! Local variables
-    real(WP),dimension(gp_s(1),gp_s(2),0:N_proc(direction)+1)   :: deltaS       ! first order scalar variation
+    real(WP),dimension(gp_s(1),gp_s(2),0:mesh_sc%N_proc(direction)+1)   :: deltaS       ! first order scalar variation
     integer                                                     :: ind          ! loop indice on particle indice
     real(WP),dimension(gp_s(1),gp_s(2))                         :: afl          ! = cfl - [cfl] where [] denotes the nearest int.
 
     ! ===== Compute scalar variation =====
     ! -- For the "middle" block --
-    do ind = 1, N_proc(direction)-1
+    do ind = 1, mesh_sc%N_proc(direction)-1
         deltaS(:,:,ind) = scalar(ind+1,j:j+gp_s(1)-1,k:k+gp_s(2)-1) - scalar(ind,j:j+gp_s(1)-1,k:k+gp_s(2)-1)
     end do
     ! -- For the first element of each line --
-    deltaS(:,:,0) = scalar(1,j:j+gp_s(1)-1,k:k+gp_s(2)-1) - scalar(N(direction),j:j+gp_s(1)-1,k:k+gp_s(2)-1)   ! scalar(1) - scalar(0)
+    deltaS(:,:,0) = scalar(1,j:j+gp_s(1)-1,k:k+gp_s(2)-1) - scalar(mesh_sc%N(direction),j:j+gp_s(1)-1,k:k+gp_s(2)-1)   ! scalar(1) - scalar(0)
     ! -- For the last element of each line --
-    deltaS(:,:,N(direction))    = deltaS(:,:,0)   ! scalar(N+1) - scalar(N)
-    deltaS(:,:,N(direction)+1)  = deltaS(:,:,1)   ! scalar(N+2) - scalar(N+1)
+    deltaS(:,:,mesh_sc%N(direction))    = deltaS(:,:,0)   ! scalar(N+1) - scalar(N)
+    deltaS(:,:,mesh_sc%N(direction)+1)  = deltaS(:,:,1)   ! scalar(N+2) - scalar(N+1)
 
 
     ! ===== Compute slope and limitator =====
     ! Note that limit = limitator function/divided by 8
     ! Van Leer limitator
-    do ind = 1, N_proc(direction)
+    do ind = 1, mesh_sc%N_proc(direction)
         where(deltaS(:,:,ind)/=0)
             afl = p_pos(ind,:,:)
             afl = afl - nint(afl)
@@ -738,7 +757,7 @@ subroutine advecX_limitator_group_no_com(gp_s, j, k, p_pos, &
             limit(ind+1,:,:) = 0.0_WP
         end where
     end do
-    limit(1,:,:) = limit(N_proc(direction)+1,:,:)
+    limit(1,:,:) = limit(mesh_sc%N_proc(direction)+1,:,:)
     ! Classical (corrected) lambda formula: limitator function = 1
     ! limit = 1._WP/8._WP
 
diff --git a/HySoP/src/scalesInterface/particles/advecY.f90 b/HySoP/src/scalesInterface/particles/advecY.f90
index 80b8e176c..1bd653921 100644
--- a/HySoP/src/scalesInterface/particles/advecY.f90
+++ b/HySoP/src/scalesInterface/particles/advecY.f90
@@ -1,3 +1,4 @@
+!USEFORTEST advec
 !> @addtogroup part
 !! @{
 
@@ -70,22 +71,18 @@ contains
 ! ====================    Remeshing tools ====================
 ! ============================================================
 
-!>Remesh particle inside a buffer. Use corrected lambda remeshing polynoms.
-!!@autor Jean-Baptiste Lagaert
-!!  @param[in]      gs              = size of group of line along the current direction
-!!  @param[in]      i,j             = X- and Y-coordinates of the first line along X inside the current group of lines.
-!!  @param[in]      ind_min         = indices from the original array "pos_in_buffer" does not start from 1.
+!> Remesh particle inside a buffer. Use corrected lambda remeshing polynoms.
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        ind_min     = indices from the original array "pos_in_buffer" does not start from 1.
 !!                                    It actually start from ind_min and to avoid access out of range,
 !!                                    a gap of (-ind_min) will be added to each indices from "pos_in_buffer.
-!!  @param[in]      p_pos_adim      = adimensionned  particles position
-!!  @param[in]      bl_type         = table of blocks type (center of left)
-!!  @param[in]      bl_tag          = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
-!!                                    and the begining of the following one is tagged)
-!!  @param[in]      send_min        = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
-!!  @param[in]      send_max        = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
-!!  @param[in]      scalar          = the initial scalar field transported by particles
-!!  @param[out]     buffer          = buffer where particles are remeshed
-!!  @param[in,out]  pos_in_buffer   = information about where remesing the particle inside the buffer
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        scalar      = the initial scalar field transported by particles
+!!    @param[out]       buffer      = buffer where particles are remeshed
+!!    @param[in]        remesh_line = subroutine wich remesh a line of particle with the right remeshing formula
 subroutine advecY_remesh_in_buffer_lambda(gs, i, k, ind_min, p_pos_adim, bl_type, bl_tag, send_min, send_max, &
         & scalar, buffer, pos_in_buffer)
 
@@ -110,33 +107,32 @@ subroutine advecY_remesh_in_buffer_lambda(gs, i, k, ind_min, p_pos_adim, bl_type
 
     ! Other local variables
     integer                                 :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the
-    integer, dimension(2)                   :: send_range_all ! maximal (among the lines group) distance between me and processus to wich I send information
     type(real_pter),dimension(:),allocatable:: remeshY_pter ! pointer to send buffer in which scalar are sorted by line indice.
                                                             ! sorted by receivers
     integer                                 :: i1, i2       ! indice of a line into the group
     integer                                 :: ind          ! indice of the current particle inside the current line.
 
     ! ===== Remeshing into the buffer by using pointer array =====
-    ! -- Allocate remeshY_pter --
-    send_range_all(1) = minval(send_min)
-    send_range_all(2) = maxval(send_max)
-    allocate(remeshY_pter(send_range_all(1):send_range_all(2)))
     do i2 = 1, gs(2)
         do i1 = 1, gs(1)
+            send_j_min = send_min(i1,i2)
+            send_j_max = send_max(i1,i2)
 
-            do ind = send_min(i1,i2), send_max(i1,i2)
-                proc_gap = floor(real(ind-1)/N_proc(direction)) - (ind_min-1)
+            ! -- Allocate remeshY_pter --
+            allocate(remeshY_pter(send_j_min:send_j_max))
+            do ind = send_j_min, send_j_max
+                proc_gap = floor(real(ind-1)/mesh_sc%N_proc(direction)) - (ind_min-1)
                 remeshY_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
                 pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
             end do
 
             ! -- Remesh the particles in the buffer --
             call AC_remesh_lambda_pter(direction, p_pos_adim(:,i1,i2), scalar(i+i1-1,:,k+i2-1), &
-                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_range_all(1), remeshY_pter)
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, remeshY_pter)
 
+            deallocate(remeshY_pter)
         end do
     end do
-    deallocate(remeshY_pter)
 
     ! Now scalar is put in buffer. Therefore, scalar has to be
     ! re-init to 0 before starting to redistribute to the scalar.
@@ -201,7 +197,7 @@ subroutine advecY_remesh_in_buffer_limit_lambda(gs, i, k, ind_min, p_pos_adim, b
             ! -- Allocate remeshY_pter --
             allocate(remeshY_pter(send_j_min:send_j_max))
             do ind = send_j_min, send_j_max
-                proc_gap = floor(real(ind-1)/N_proc(direction)) - (ind_min-1)
+                proc_gap = floor(real(ind-1)/mesh_sc%N_proc(direction)) - (ind_min-1)
                 remeshY_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
                 pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
             end do
@@ -257,7 +253,7 @@ subroutine advecY_remesh_in_buffer_Mprime(gs, i, k, ind_min, p_pos_adim, send_mi
                                                             ! sorted by receivers
     integer                                 :: i1, i2       ! indice of a line into the group
     integer                                 :: ind          ! indice of the current particle inside the current line.
-    real(WP), dimension(N_proc(direction))  :: pos_translat ! translation of p_pos_adim as array indice
+    real(WP), dimension(mesh_sc%N_proc(direction))  :: pos_translat ! translation of p_pos_adim as array indice
                                                             ! are now starting from 1 and not ind_min
 
 
@@ -268,7 +264,7 @@ subroutine advecY_remesh_in_buffer_Mprime(gs, i, k, ind_min, p_pos_adim, send_mi
             ! -- Allocate remeshX_pter --
             allocate(remeshY_pter(send_min(i1,i2):send_max(i1,i2)))
             do ind = send_min(i1,i2), send_max(i1,i2)
-                proc_gap = floor(real(ind-1)/N_proc(direction)) - (ind_min-1)
+                proc_gap = floor(real(ind-1)/mesh_sc%N_proc(direction)) - (ind_min-1)
                 remeshY_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
                 pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
             end do
@@ -276,7 +272,7 @@ subroutine advecY_remesh_in_buffer_Mprime(gs, i, k, ind_min, p_pos_adim, send_mi
             pos_translat = p_pos_adim(:,i1,i2) - send_min(i1,i2) + 1
 
             ! -- Remesh the particles in the buffer --
-            do ind = 1, N_proc(direction)
+            do ind = 1, mesh_sc%N_proc(direction)
                 call AC_remesh_Mprime_pter(pos_translat(ind), scalar(i+i1-1,ind,k+i2-1), remeshY_pter)
             end do
 
@@ -315,7 +311,7 @@ subroutine advecY_remesh_buffer_to_scalar(gs, i, k, ind_proc, gap, begin_i1, car
     integer, dimension(:,:), intent(in)         :: cartography
     real(WP),dimension(:), intent(in)           :: buffer       ! buffer containing the data to redistribute into the local scalar field.
     real(WP), dimension(:,:,:), intent(inout)   :: scalar       ! the scalar field.
-    integer, intent(out)                        :: beg_buffer   ! first indice inside where the scalar values are stored into the buffer for the current sender processus.
+    integer, intent(inout)                      :: beg_buffer   ! first indice inside where the scalar values are stored into the buffer for the current sender processus.
                                                                 ! To know where reading data into the buffer.
 
     ! Other local variables
@@ -359,25 +355,23 @@ end subroutine advecY_remesh_buffer_to_scalar
 !!    @param[in]    i           = X-indice of the current line
 !!    @param[in]    k           = Z-indice of the current line
 !!    @param[in]    Gsize       = size of groups (along Y direction)
-!!    @param[out]   p_pos_adim  = adimensioned particles postion
 !!    @param[out]   p_V         = particle velocity
-subroutine advecY_init_group(Vy, i, k, Gsize, p_pos_adim, p_V)
+subroutine advecY_init_group(Vy, i, k, Gsize, p_V)
 
     use cart_topology   ! description of mesh and of mpi topology
 
     ! input/output
-    integer, intent(in)                                                     :: i,k
-    integer, dimension(2), intent(in)                                       :: Gsize
-    real(WP), dimension(N_proc(direction),Gsize(1),Gsize(2)),intent(out)    :: p_pos_adim, p_V
-    real(WP), dimension(:,:,:), intent(in)                                  :: Vy
+    integer, intent(in)                         :: i,k
+    integer, dimension(2), intent(in)           :: Gsize
+    real(WP), dimension(:,:,:),intent(out)      :: p_V
+    real(WP), dimension(:,:,:), intent(in)      :: Vy
     ! Other local variables
     integer                                     :: ind          ! indice
     integer                                     :: i_gp, k_gp   ! Y and Z indice of the current line in the group
 
     do k_gp = 1, Gsize(2)
         do i_gp = 1, Gsize(1)
-            do ind = 1, N_proc(direction)
-                p_pos_adim(ind, i_gp, k_gp) = ind
+            do ind = 1, mesh_sc%N_proc(direction)
                 p_V(ind, i_gp, k_gp)        = Vy(i+i_gp-1,ind,k+k_gp-1)
             end do
         end do
@@ -428,7 +422,7 @@ subroutine advecY_limitator_group(gp_s, ind_group, i, k, p_pos, &
 
     ! Local variables
     real(WP),dimension(gp_s(1),gp_s(2),2)                       :: Sbuffer, Rbuffer ! buffer to exchange scalar or limitator at boundaries with neighbors.
-    real(WP),dimension(gp_s(1),gp_s(2),N_proc(direction)+1)     :: deltaS       ! first order scalar variation
+    real(WP),dimension(gp_s(1),gp_s(2),mesh_sc%N_proc(direction)+1)     :: deltaS       ! first order scalar variation
     integer                                                     :: ind,i1,i2    ! loop indice
     integer                                                     :: send_request ! mpi status of nonblocking send
     integer                                                     :: rece_request ! mpi status of nonblocking receive
@@ -445,7 +439,7 @@ subroutine advecY_limitator_group(gp_s, ind_group, i, k, p_pos, &
     ! Receive ghost value, ie value from neighbors boundaries.
     tag_table = compute_tag(ind_group, tag_part_slope, direction)
     call mpi_Irecv(Rbuffer(1,1,1), com_size, MPI_DOUBLE_PRECISION, &
-            & neighbors(direction,2), tag_table(1), D_comm(direction), rece_request, ierr)
+            & neighbors(direction,1), tag_table(1), D_comm(direction), rece_request, ierr)
     ! Send ghost for the two first scalar values of each line
     do i1 = 1, gp_s(1)
         do i2 = 1, gp_s(2)
@@ -454,11 +448,11 @@ subroutine advecY_limitator_group(gp_s, ind_group, i, k, p_pos, &
         end do
     end do
     call mpi_ISsend(Sbuffer(1,1,1), com_size, MPI_DOUBLE_PRECISION, &
-            & neighbors(direction,1), tag_table(1), D_comm(direction), send_request, ierr)
+            & neighbors(direction,-1), tag_table(1), D_comm(direction), send_request, ierr)
 
     ! ===== Compute scalar variation =====
     ! -- For the "middle" block --
-    do ind = 1, N_proc(direction)-1
+    do ind = 1, mesh_sc%N_proc(direction)-1
         deltaS(:,:,ind) = scalar(i:i+gp_s(1)-1,ind+1,k:k+gp_s(2)-1) &
                         & - scalar(i:i+gp_s(1)-1,ind,k:k+gp_s(2)-1)
     end do
@@ -466,8 +460,8 @@ subroutine advecY_limitator_group(gp_s, ind_group, i, k, p_pos, &
     ! Check reception
     call mpi_wait(rece_request, rece_status, ierr)
     ! Compute delta
-    deltaS(:,:,N_proc(direction)) = Rbuffer(:,:,1) - scalar(i:i+gp_s(1)-1,ind,k:k+gp_s(2)-1)   ! scalar(N+1) - scalar(N)
-    deltaS(:,:,N_proc(direction)+1) = Rbuffer(:,:,2) - Rbuffer(:,:,1)   ! scalar(N+1) - scalar(N)
+    deltaS(:,:,mesh_sc%N_proc(direction)) = Rbuffer(:,:,1) - scalar(i:i+gp_s(1)-1,ind,k:k+gp_s(2)-1)   ! scalar(N+1) - scalar(N)
+    deltaS(:,:,mesh_sc%N_proc(direction)+1) = Rbuffer(:,:,2) - Rbuffer(:,:,1)   ! scalar(N+1) - scalar(N)
 
 
     ! ===== Compute limitator =====
diff --git a/HySoP/src/scalesInterface/particles/advecZ.f90 b/HySoP/src/scalesInterface/particles/advecZ.f90
index df5228183..898c4abea 100644
--- a/HySoP/src/scalesInterface/particles/advecZ.f90
+++ b/HySoP/src/scalesInterface/particles/advecZ.f90
@@ -1,3 +1,4 @@
+!USEFORTEST advec
 !> @addtogroup part
 !! @{
 
@@ -64,23 +65,22 @@ contains
 ! ====================    Remeshing tools         ====================
 ! ====================================================================
 
-!>Remesh particle inside a buffer. Use corrected lambda remeshing polynoms.
-!!@autor Jean-Baptiste Lagaert
-!!  @param[in]      gs              = size of group of line along the current direction
-!!  @param[in]      i,j             = X- and Y-coordinates of the first line along X inside the current group of lines.
-!!  @param[in]      ind_min         = indices from the original array "pos_in_buffer" does not start from 1.
+!> Remesh particle inside a buffer. Use corrected lambda remeshing polynoms.
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        i,j         = X- and Y-coordinates of the first line along X inside the current group of lines.
+!!    @param[in]        ind_min     = indices from the original array "pos_in_buffer" does not start from 1.
 !!                                    It actually start from ind_min and to avoid access out of range,
 !!                                    a gap of (-ind_min) will be added to each indices from "pos_in_buffer.
-!!  @param[in]      p_pos_adim      = adimensionned  particles position
-!!  @param[in]      bl_type         = table of blocks type (center of left)
-!!  @param[in]      bl_tag          = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        bl_type     = table of blocks type (center of left)
+!!    @param[in]        bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
 !!                                    and the begining of the following one is tagged)
-!!  @param[in]      send_min        = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
-!!  @param[in]      send_max        = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
-!!  @param[in]      send_range_all  = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
-!!  @param[in]      scalar          = the initial scalar field transported by particles
-!!  @param[out]     buffer          = buffer where particles are remeshed
-!!  @param[in,out]  pos_in_buffer   = information about where remesing the particle inside the buffer
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        scalar      = the initial scalar field transported by particles
+!!    @param[out]       buffer      = buffer where particles are remeshed
+!!    @param[in,out]    pos_in_buffer   = information about where remesing the particle inside the buffer
 subroutine advecZ_remesh_in_buffer_lambda(gs, i, j, ind_min, p_pos_adim, bl_type, bl_tag, send_min, send_max, &
         & scalar, buffer, pos_in_buffer)
 
@@ -105,33 +105,32 @@ subroutine advecZ_remesh_in_buffer_lambda(gs, i, j, ind_min, p_pos_adim, bl_type
 
     ! Other local variables
     integer                                 :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the
-    integer, dimension(2)                   :: send_range_all ! maximal (among the lines group) distance between me and processus to wich I send information
     type(real_pter),dimension(:),allocatable:: remeshZ_pter  ! pointer to send buffer in which scalar are sorted by line indice.
                                                             ! sorted by receivers
     integer                                 :: i1, i2       ! indice of a line into the group
     integer                                 :: ind          ! indice of the current particle inside the current line.
 
     ! ===== Remeshing into the buffer by using pointer array =====
-    ! -- Allocate remeshX_pter --
-    send_range_all(1) = minval(send_min)
-    send_range_all(2) = maxval(send_max)
-    allocate(remeshZ_pter(send_range_all(1):send_range_all(2)))
     do i2 = 1, gs(2)
         do i1 = 1, gs(1)
+            send_j_min = send_min(i1,i2)
+            send_j_max = send_max(i1,i2)
 
-            do ind = send_min(i1,i2), send_max(i1,i2)
-                proc_gap = floor(real(ind-1)/N_proc(direction)) - (ind_min-1)
+            ! -- Allocate remeshX_pter --
+            allocate(remeshZ_pter(send_j_min:send_j_max))
+            do ind = send_j_min, send_j_max
+                proc_gap = floor(real(ind-1)/mesh_sc%N_proc(direction)) - (ind_min-1)
                 remeshZ_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
                 pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
             end do
 
             ! -- Remesh the particles in the buffer --
             call AC_remesh_lambda_pter(direction, p_pos_adim(:,i1,i2), scalar(i+i1-1,j+i2-1,:), &
-                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_range_all(1), remeshZ_pter)
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, remeshZ_pter)
 
+            deallocate(remeshZ_pter)
         end do
     end do
-    deallocate(remeshZ_pter)
 
     ! Scalar must be re-init before ending the remeshing
     scalar(i:i+gs(1)-1,j:j+gs(2)-1,:) = 0
@@ -195,7 +194,7 @@ subroutine advecZ_remesh_in_buffer_limit_lambda(gs, i, j, ind_min, p_pos_adim, b
             ! -- Allocate remeshX_pter --
             allocate(remeshZ_pter(send_j_min:send_j_max))
             do ind = send_j_min, send_j_max
-                proc_gap = floor(real(ind-1)/N_proc(direction)) - (ind_min-1)
+                proc_gap = floor(real(ind-1)/mesh_sc%N_proc(direction)) - (ind_min-1)
                 remeshZ_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
                 pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
             end do
@@ -253,7 +252,7 @@ subroutine advecZ_remesh_in_buffer_Mprime(gs, i, j, ind_min, p_pos_adim, send_mi
                                                             ! sorted by receivers
     integer                                 :: i1, i2       ! indice of a line into the group
     integer                                 :: ind          ! indice of the current particle inside the current line.
-    real(WP), dimension(N_proc(direction))  :: pos_translat ! translation of p_pos_adim as array indice
+    real(WP), dimension(mesh_sc%N_proc(direction))  :: pos_translat ! translation of p_pos_adim as array indice
                                                             ! are now starting from 1 and not ind_min
 
 
@@ -264,7 +263,7 @@ subroutine advecZ_remesh_in_buffer_Mprime(gs, i, j, ind_min, p_pos_adim, send_mi
             ! -- Allocate remeshZ_pter --
             allocate(remeshZ_pter(send_min(i1,i2):send_max(i1,i2)))
             do ind = send_min(i1,i2), send_max(i1,i2)
-                proc_gap = floor(real(ind-1)/N_proc(direction)) - (ind_min-1)
+                proc_gap = floor(real(ind-1)/mesh_sc%N_proc(direction)) - (ind_min-1)
                 remeshZ_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
                 pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
             end do
@@ -272,7 +271,7 @@ subroutine advecZ_remesh_in_buffer_Mprime(gs, i, j, ind_min, p_pos_adim, send_mi
             pos_translat = p_pos_adim(:,i1,i2) - send_min(i1,i2) + 1
 
             ! -- Remesh the particles in the buffer --
-            do ind = 1, N_proc(direction)
+            do ind = 1, mesh_sc%N_proc(direction)
                 call AC_remesh_Mprime_pter(pos_translat(ind), scalar(i+i1-1,j+i2-1,ind), remeshZ_pter)
             end do
 
@@ -311,7 +310,7 @@ subroutine advecZ_remesh_buffer_to_scalar(gs, i, j, ind_proc, gap, begin_i1, car
     integer, dimension(:,:), intent(in)         :: cartography
     real(WP),dimension(:), intent(in)           :: buffer       ! buffer containing the data to redistribute into the local scalar field.
     real(WP), dimension(:,:,:), intent(inout)   :: scalar       ! the scalar field.
-    integer, intent(out)                        :: beg_buffer   ! first indice inside where the scalar values are stored into the buffer for the current sender processus.
+    integer, intent(inout)                      :: beg_buffer   ! first indice inside where the scalar values are stored into the buffer for the current sender processus.
                                                                 ! To know where reading data into the buffer.
 
     ! Other local variables
@@ -355,25 +354,23 @@ end subroutine advecZ_remesh_buffer_to_scalar
 !!    @param[in]    i           = X-indice of the current line
 !!    @param[in]    j           = Y-indice of the current line
 !!    @param[in]    Gsize       = size of groups (along Z direction)
-!!    @param[out]   p_pos_adim  = adimensioned particles postion
-!!    @param[out]   p_V         = particle velocity 
-subroutine advecZ_init_group(Vz, i, j, Gsize, p_pos_adim, p_V)
+!!    @param[out]   p_V         = particle velocity
+subroutine advecZ_init_group(Vz, i, j, Gsize, p_V)
 
     use cart_topology   ! Description of mesh and of mpi topology
 
     ! Input/Output
-    integer, intent(in)                                                     :: i,j
-    integer, dimension(2), intent(in)                                       :: Gsize
-    real(WP), dimension(N_proc(direction),Gsize(1),Gsize(2)),intent(out)    :: p_pos_adim, p_V
-    real(WP), dimension(:,:,:), intent(in)                                  :: Vz
+    integer, intent(in)                         :: i,j
+    integer, dimension(2), intent(in)           :: Gsize
+    real(WP), dimension(:,:,:),intent(out)      :: p_V
+    real(WP), dimension(:,:,:), intent(in)      :: Vz
     ! Other local variables
     integer                                     :: ind          ! indice
     integer                                     :: i_gp, j_gp   ! X and Y indice of the current line in the group
 
     do j_gp = 1, Gsize(2)
         do i_gp = 1, Gsize(1)
-            do ind = 1, N_proc(direction)
-                p_pos_adim(ind, i_gp, j_gp) = ind
+            do ind = 1, mesh_sc%N_proc(direction)
                 p_V(ind, i_gp, j_gp)        = Vz(i+(i_gp-1),j+(j_gp-1), ind)
             end do
         end do
@@ -425,7 +422,7 @@ subroutine advecZ_limitator_group(gp_s, ind_group, i, j, p_pos, &
 
     ! Local variables
     real(WP),dimension(gp_s(1),gp_s(2),2)                       :: Sbuffer, Rbuffer ! buffer to exchange scalar or limitator at boundaries with neighbors.
-    real(WP),dimension(gp_s(1),gp_s(2),N_proc(direction)+1)     :: deltaS       ! first order scalar variation
+    real(WP),dimension(gp_s(1),gp_s(2),mesh_sc%N_proc(direction)+1)     :: deltaS       ! first order scalar variation
     integer                                                     :: ind          ! loop indice on particle indice
     integer                                                     :: send_request ! mpi status of nonblocking send
     integer                                                     :: rece_request ! mpi status of nonblocking receive
@@ -442,15 +439,15 @@ subroutine advecZ_limitator_group(gp_s, ind_group, i, j, p_pos, &
     ! Receive ghost value, ie value from neighbors boundaries.
     tag_table = compute_tag(ind_group, tag_part_slope, direction)
     call mpi_Irecv(Rbuffer(1,1,1), com_size, MPI_DOUBLE_PRECISION, &
-            & neighbors(direction,2), tag_table(1), D_comm(direction), rece_request, ierr)
+            & neighbors(direction,1), tag_table(1), D_comm(direction), rece_request, ierr)
     ! Send ghost for the two first scalar values of each line
     Sbuffer = scalar(i:i+gp_s(1)-1,j:j+gp_s(2)-1,1:2)
     call mpi_ISsend(Sbuffer(1,1,1), com_size, MPI_DOUBLE_PRECISION, &
-            & neighbors(direction,1), tag_table(1), D_comm(direction), send_request, ierr)
+            & neighbors(direction,-1), tag_table(1), D_comm(direction), send_request, ierr)
 
     ! ===== Compute scalar variation =====
     ! -- For the "middle" block --
-    do ind = 1, N_proc(direction)-1
+    do ind = 1, mesh_sc%N_proc(direction)-1
         deltaS(:,:,ind) = scalar(i:i+gp_s(1)-1,j:j+gp_s(2)-1,ind+1) &
                         & - scalar(i:i+gp_s(1)-1,j:j+gp_s(2)-1,ind)
     end do
@@ -458,9 +455,9 @@ subroutine advecZ_limitator_group(gp_s, ind_group, i, j, p_pos, &
     ! Check reception
     call mpi_wait(rece_request, rece_status, ierr)
     ! Compute delta
-    deltaS(:,:,N_proc(direction)) = Rbuffer(:,:,1) &
+    deltaS(:,:,mesh_sc%N_proc(direction)) = Rbuffer(:,:,1) &
                                     & - scalar(i:i+gp_s(1)-1,j:j+gp_s(2)-1,ind)   ! scalar(N+1) - scalar(N)
-    deltaS(:,:,N_proc(direction)+1) = Rbuffer(:,:,2) - Rbuffer(:,:,1)   ! scalar(N+1) - scalar(N)
+    deltaS(:,:,mesh_sc%N_proc(direction)+1) = Rbuffer(:,:,2) - Rbuffer(:,:,1)   ! scalar(N+1) - scalar(N)
 
 
     ! ===== Compute slope and limitator =====
diff --git a/HySoP/src/scalesInterface/particles/advec_Vector.f90 b/HySoP/src/scalesInterface/particles/advec_Vector.f90
index 5cde64bc3..a6722a535 100644
--- a/HySoP/src/scalesInterface/particles/advec_Vector.f90
+++ b/HySoP/src/scalesInterface/particles/advec_Vector.f90
@@ -1,3 +1,4 @@
+!USEFORTEST advec
 !> @addtogroup part
 !! @{
 !------------------------------------------------------------------------------
@@ -22,6 +23,8 @@
 !------------------------------------------------------------------------------
 module advec_Vect
 
+    use advec, only : advec_init
+
     use precision_tools
     use advec_abstract_proc
     implicit none
@@ -31,8 +34,6 @@ module advec_Vect
     character(len=str_short), private   :: type_part_solv
     !> dimensionnal splitting (eg classical, Strang or particle)
     character(len=str_short), private   :: dim_splitting
-    !> Group size along current direction
-    integer, private, dimension(2)  :: gsX, gsY, gsZ
 
 
     ! ===== Public procedures =====
@@ -57,82 +58,23 @@ contains
 !!    @return type_part_solver      = numerical method used for advection
 function type_part_solver()
     character(len=str_short)    :: type_part_solver
-    
+
     type_part_solver = type_part_solv
 end function
 
 
-!> Initialise the particle advection methods
-!!    @param[in]    order       = to choose the remeshing method (and thus the order)
-!!    @param[out]   stab_coeff  = stability coefficient (condition stability is
-!!                                  dt< stab_coeff/norm_inf(V))
-!!    @param[in]    dim_split   = dimensionnal splitting (eg classical,
-!!                                    Strang splitting or particle splitting)
-!!    @param[in]    verbosity   = to display info about chosen remeshing formula (optional)
-subroutine advec_init_Vect(order, stab_coeff, verbosity, dim_split)
-
-    use advec_variables       ! contains info about solver parameters and others.
-    use cart_topology   ! Description of mesh and of mpi topology
-    use advecX          ! solver for advection along X
-    use advecY          ! solver for advection along Y
-    use advecZ          ! solver for advection along Z
-    use advec_common    ! some procedures common to advection along all directions
-
-    ! Input/Output
-    character(len=*), optional, intent(in)  ::  order, dim_split
-    logical, optional, intent(in)           ::  verbosity
-    real(WP), optional, intent(out)         ::  stab_coeff
-
-    ! Use default solver if it is not chosen by the user.
-    if(present(order)) then
-        type_part_solv = order
-    else
-        type_part_solv = 'p_O2'
-    end if
-
-    ! Initialize the solver
-    if (present(verbosity)) then
-        call AC_solver_init(type_part_solv, verbosity)
-    else
-        call AC_solver_init(type_part_solv)
-    end if
-
-    if (present(stab_coeff)) stab_coeff = 1.0/(dble(bl_size))
-
-    ! Call the right remeshing formula
-    select case(type_part_solv)
-        case('p_O2')
-            advec_remesh_bis => AC_remesh_lambda_group
-        case('p_O4')
-            advec_remesh_bis => AC_remesh_lambda_group
-        case('p_L2')
-            advec_remesh_bis => AC_remesh_limit_lambda_group
-        case('p_M6')
-            advec_remesh_bis => AC_remesh_Mprime_group
-        case('p_M8')
-            advec_remesh_bis => AC_remesh_Mprime_group
-        case default
-            advec_remesh_bis => AC_remesh_lambda_group
-    end select
-
-    call AC_setup_init()
-
-    ! Save group size
-    gsX =group_size(1,:)
-    gsY =group_size(2,:)
-    gsZ =group_size(3,:)
-
-end subroutine advec_init_Vect
-
-
 !> Solve advection equation - order 2 in time (order 2 dimensional splitting)
 !!    @param[in]        dt          = time step
 !!    @param[in]        Vx          = velocity along x (could be discretised on a bigger mesh then the scalar)
 !!    @param[in]        Vy          = velocity along y
 !!    @param[in]        Vz          = velocity along z
-!!    @param[in,out]    scal        = scalar field to advect
-subroutine advec_step_Vect(dt, Vx, Vy, Vz, scal_Vector)
+!!    @param[in,out]    VectX       = X component of vector to advect
+!!    @param[in,out]    VectY       = Y component of vector to advect
+!!    @param[in,out]    VectZ       = Z component of vector to advect
+subroutine advec_step_Vect(dt, Vx, Vy, Vz, vectX, vectY, vectZ)
 
+    use advec, only : advec_setup_alongX, advec_setup_alongY, &
+        & advec_setup_alongZ, gsX, gsY, gsZ
     use advecX          ! Method to advec along X
     use advecY          ! Method to advec along Y
     use advecZ          ! Method to advec along Z
@@ -140,202 +82,143 @@ subroutine advec_step_Vect(dt, Vx, Vy, Vz, scal_Vector)
     ! Input/Output
     real(WP), intent(in)                        :: dt
     real(WP), dimension(:,:,:), intent(in)      :: Vx, Vy, Vz
-    real(WP), dimension(:,:,:,:), intent(inout) :: scal_Vector
-
-    call advecX_calc_Vect(dt/2.0, Vx, scal_Vector)
-    call advecY_calc_Vect(dt/2.0, Vy, scal_Vector)
-    call advecZ_calc_Vect(dt/2.0, Vz, scal_Vector)
-    call advecZ_calc_Vect(dt/2.0, Vz, scal_Vector)
-    call advecY_calc_Vect(dt/2.0, Vy, scal_Vector)
-    call advecX_calc_Vect(dt/2.0, Vx, scal_Vector)
+    real(WP), dimension(:,:,:), intent(inout)   :: vectX, vectY, vectZ
+
+    call advec_setup_alongX()
+    call advec_vector_X_basic_no_com(dt/2.0, gsX, Vx, vectX, vectY, vectZ)
+    call advec_setup_alongY()
+    call advec_vector_1D_basic(dt/2.0, gsY, Vy, vectX, vectY, vectZ)
+    call advec_setup_alongZ()
+    call advec_vector_1D_basic(dt/2.0, gsZ, Vz, vectX, vectY, vectZ)
+    call advec_vector_1D_basic(dt/2.0, gsZ, Vz, vectX, vectY, vectZ)
+    call advec_setup_alongY()
+    call advec_vector_1D_basic(dt/2.0, gsY, Vy, vectX, vectY, vectZ)
+    call advec_setup_alongX()
+    call advec_vector_X_basic_no_com(dt/2.0, gsX, Vx, vectX, vectY, vectZ)
 
 end subroutine advec_step_Vect
 
 
-!> Scalar advection (this procedure call the right solver, depending on the simulation setup)
+!> Scalar advection along one direction (this procedure call the right solver, depending on the simulation setup).
+!! Variant for advection of a 3D-vector.
 !!    @param[in]        dt          = time step
-!!    @param[in]        Vx          = velocity along X (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal_vect   = scalar field to advect
-subroutine advecX_calc_Vect(dt, Vx, scal_vect)
-
-    use advecX          ! Procedure specific to advection along X
-    use advec_common    ! Some procedures common to advection along all directions
+!!    @param[in]        gs          = size of the work item along transverse direction
+!!    @param[in]        V_comp      = velocity component
+!!    @param[in,out]    VectX       = X component of vector to advect
+!!    @param[in,out]    VectY       = Y component of vector to advect
+!!    @param[in,out]    VectZ       = Z component of vector to advect
+subroutine advec_vector_1D_basic(dt, gs, V_comp, vectX, vectY, vectZ)
+
+    use advec, only : advec_init_velo, advec_remesh, line_dir, gp_dir1, gp_dir2
+    use advecX, only : advecX_init_group    ! procdure devoted to advection along Z
+    use advecY, only : advecY_init_group    ! procdure devoted to advection along Z
+    use advecZ, only : advecZ_init_group    ! procdure devoted to advection along Z
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! Description of mesh and of mpi topology
+    use advec_common    ! some procedures common to advection along all line_dirs
 
     ! Input/Output
-    real(WP), intent(in)                                                :: dt
-    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vx
-    real(WP), dimension(:,:,:,:), intent(inout)                         :: scal_vect
+    real(WP), intent(in)                          :: dt
+    integer, dimension(2), intent(in)             :: gs
+    real(WP), dimension(:,:,:), intent(in)        :: V_comp
+    real(WP), dimension(:,:,:), intent(inout)     :: vectX, vectY, vectZ
     ! Other local variables
-    integer, parameter                                  :: direction =1 ! current direction
-    integer                                             :: j,k          ! indice of the currend mesh point
-    integer                                             :: sca          ! indice of the currend scalar field
-    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
-    real(WP),dimension(N_proc(direction),gsX(1),gsX(2)) :: p_pos_adim   ! adimensionned particles position
-    real(WP),dimension(N_proc(direction),gsX(1),gsX(2)) :: p_V          ! particles velocity
-
-    ! Allocate send_ind_min/max
-    if(allocated(send_group_min)) deallocate(send_group_min)
-    allocate(send_group_min(group_size(direction,1),group_size(direction,2)))
-    if(allocated(send_group_max)) deallocate(send_group_max)
-    allocate(send_group_max(group_size(direction,1),group_size(direction,2)))
-
-    ! Initialise the pointer for optimized remeshing
-    call AC_setup_alongX()
+    integer                                       :: i,j          ! indice of the currend mesh point
+    integer, dimension(2)                         :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP), dimension(mesh_sc%N_proc(line_dir),gs(1),gs(2))  :: p_pos_adim ! adimensionned particles position
+    real(WP), dimension(mesh_sc%N_proc(line_dir),gs(1),gs(2))  :: p_V        ! particles velocity
 
     ind_group = 0
 
-    do k = 1, N_proc(3), gsX(2)
+    do j = 1, mesh_sc%N_proc(gp_dir2), gs(2)
         ind_group(2) = ind_group(2) + 1
         ind_group(1) = 0
-        do j = 1, N_proc(2), gsX(1)
+        do i = 1, mesh_sc%N_proc(gp_dir1), gs(1)
             ind_group(1) = ind_group(1) + 1
 
             ! ===== Init particles =====
-            call advecX_init_group(Vx, j, k, gsX, p_pos_adim, p_V)
+            call advec_init_velo(V_comp, i, j, gs, p_pos_adim)
+            ! p_pos is used to store velocity at grid point
+            call AC_get_p_pos_adim(p_V, p_pos_adim, 0.5_WP*dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+            ! p_V = middle point position = position at middle point for RK2 scheme
 
             ! ===== Advection =====
             ! -- Compute velocity (with a RK2 scheme) --
-            call AC_velocity_interpol_group(dt, direction, gsX, ind_group, p_pos_adim, p_V)
-
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+            ! Note that p_pos is used as velocity component storage
+            call AC_interpol_lin(line_dir, gs, ind_group, p_pos_adim, p_V)
+            ! p_v = velocity at middle point position
+            ! -- Push particles --
+            call AC_get_p_pos_adim(p_pos_adim, p_V, dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+            ! Now p_pos = particle position and p_V = particle velocity
 
             ! ===== Remeshing =====
-            do sca = 1, size(scal_vect,4)
-                call advec_remesh_bis(direction, ind_group, gsX, &
-                    & p_pos_adim, p_V, j, k, scal_Vect(:,:,:,sca), dt)
-            end do
+            call advec_remesh(line_dir, ind_group, gs, p_pos_adim, p_V, i,j,vectX, dt)
+            call advec_remesh(line_dir, ind_group, gs, p_pos_adim, p_V, i,j,vectY, dt)
+            call advec_remesh(line_dir, ind_group, gs, p_pos_adim, p_V, i,j,vectZ, dt)
 
         end do
     end do
 
-end subroutine advecX_calc_Vect
-
+end subroutine advec_vector_1D_basic
 
-!> Scalar advection along Y (this procedure call the right solver, depending on the simulation setup)
+!> Scalar advection along one direction - variant for cases with no communication
 !!    @param[in]        dt          = time step
-!!    @param[in]        Vy          = velocity along y (could be discretised on a bigger mesh then the scalar)
+!!    @param[in]        V_comp      = velocity along X (could be discretised on a bigger mesh then the scalar)
 !!    @param[in,out]    scal3D      = scalar field to advect
-subroutine advecY_calc_Vect(dt, Vy, scal_vect)
+!> Details
+!!   Work only for direction = X. Basic (and very simple) remeshing has just to
+!! be add for other direction.
+subroutine advec_vector_X_basic_no_com(dt, gs, V_comp, vectX, vectY, vectZ)
 
-    use advecY          ! Procedure specific to advection along Y
+    use advec, only : advec_init_velo, advec_remesh, line_dir, gp_dir1, gp_dir2
+    use advecX          ! Procedure specific to advection along X
     use advec_common    ! Some procedures common to advection along all directions
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! Description of mesh and of mpi topology
 
     ! Input/Output
-    real(WP), intent(in)                                                :: dt
-    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vy
-    real(WP), dimension(:,:,:,:), intent(inout)                         :: scal_vect
+    real(WP), intent(in)                          :: dt
+    integer, dimension(2), intent(in)             :: gs
+    real(WP), dimension(:,:,:), intent(in)        :: V_comp
+    real(WP), dimension(:,:,:), intent(inout)     :: vectX, vectY, vectZ
     ! Other local variables
-    integer, parameter                                  :: direction =2 ! current direction
-    integer                                             :: i,k          ! indice of the currend mesh point
-    integer                                             :: sca          ! indice of the currend scalar field
-    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
-    real(WP),dimension(N_proc(direction),gsY(1),gsY(2)) :: p_pos_adim   ! adimensionned particles position
-    real(WP),dimension(N_proc(direction),gsY(1),gsY(2)) :: p_V          ! particles velocity
-
-    ! Allocate send_ind_min/max
-    if(allocated(send_group_min)) deallocate(send_group_min)
-    allocate(send_group_min(group_size(direction,1),group_size(direction,2)))
-    if(allocated(send_group_max)) deallocate(send_group_max)
-    allocate(send_group_max(group_size(direction,1),group_size(direction,2)))
-
-    ! Initialise the pointer for optimized remeshing
-    call AC_setup_alongY()
-
-    ind_group = 0
-
-    do k = 1, N_proc(3), gsY(2)
-        ind_group(2) = ind_group(2) + 1
-        ind_group(1) = 0
-        do i = 1, N_proc(1), gsY(1)
-            ind_group(1) = ind_group(1) + 1
-
-            ! ===== Init particles =====
-            call advecY_init(Vy, i, k, gsY, p_pos_adim, p_V)
-
-            ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_velocity_interpol_group(dt, direction, gsY, ind_group, p_pos_adim, p_V)
-
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
-
-            ! ===== Remeshing =====
-            do sca = 1, size(scal_vect,4)
-                call advec_remesh_bis(direction, ind_group, gsY, p_pos_adim, p_V, i, k, scal_vect(:,:,:,sca), dt)
-            end do
-
-        end do
-    end do
-
-end subroutine advecY_calc_Vect
-
-
-!> Scalar advection alongZ (this procedure call the right solver, depending on the simulation setup)
-!!    @param[in]        dt          = time step
-!!    @param[in]        Vz          = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal3D      = scalar field to advect
-subroutine advecZ_calc_Vect(dt, Vz, scal_vect)
-
-    use advec_variables ! contains info about solver parameters and others.
-    use cart_topology   ! Description of mesh and of mpi topology
-    use advecZ          ! procdure devoted to advection along Z
-    use advec_common    ! some procedures common to advection along all directions
-
-    ! Input/Output
-    real(WP), intent(in)                                                :: dt
-    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vz
-    real(WP), dimension(:,:,:,:), intent(inout)                         :: scal_vect
-    ! Other local variables
-    integer, parameter                                  :: direction =3 ! current direction
-    integer                                             :: i,j          ! indice of the currend mesh point
-    integer                                             :: sca          ! indice of the currend scalar field
+    integer                                             :: j,k          ! indice of the currend mesh point
     integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
-    real(WP), dimension(N_proc(direction),gsZ(1),gsZ(2))  :: p_pos_adim ! adimensionned particles position
-    real(WP), dimension(N_proc(direction),gsZ(1),gsZ(2))  :: p_V        ! particles velocity
-
-    ! Allocate send_ind_min/max
-    if(allocated(send_group_min)) deallocate(send_group_min)
-    allocate(send_group_min(group_size(direction,1),group_size(direction,2)))
-    if(allocated(send_group_max)) deallocate(send_group_max)
-    allocate(send_group_max(group_size(direction,1),group_size(direction,2)))
-
-    ! Initialise the pointer for optimized remeshing
-    call AC_setup_alongZ()
+    real(WP),dimension(mesh_sc%N_proc(line_dir),gs(1),gs(2)) :: p_pos_adim   ! adimensionned particles position
+    real(WP),dimension(mesh_sc%N_proc(line_dir),gs(1),gs(2)) :: p_V          ! particles velocity
 
     ind_group = 0
 
-    do j = 1, N_proc(2), gsZ(2)
+    do k = 1, mesh_sc%N_proc(gp_dir2), gs(2)
         ind_group(2) = ind_group(2) + 1
         ind_group(1) = 0
-        do i = 1, N_proc(1), gsZ(1)
+        do j = 1, mesh_sc%N_proc(gp_dir1), gs(1)
             ind_group(1) = ind_group(1) + 1
 
             ! ===== Init particles =====
-            call advecZ_init_group(Vz, i, j, gsZ, p_pos_adim, p_V)
+            ! p_pos is used to store velocity at grid point
+            call advec_init_velo(V_comp, j, k, gs, p_pos_adim)
+            ! p_V = middle point position = position at middle point for RK2 scheme
+            call AC_get_p_pos_adim(p_V, p_pos_adim, 0.5_WP*dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
 
             ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_velocity_interpol_group(dt, direction, gsZ, ind_group, p_pos_adim, p_V)
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+            ! -- Compute velocity (with a RK2 scheme): p_V = velocity at middle point position --
+            ! Note that p_pos is used as velocity component storage
+            call AC_interpol_lin_no_com(line_dir, gs, p_pos_adim, p_V)
+            ! p_v = velocity at middle point position
+            ! -- Push particles --
+            call AC_get_p_pos_adim(p_pos_adim, p_V, dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+            ! Now p_pos = particle position and p_V = particle velocity
 
             ! ===== Remeshing =====
-            do sca = 1, size(scal_vect,4)
-                call advec_remesh_bis(direction, ind_group, gsZ, p_pos_adim, p_V, i,j,scal_vect(:,:,:,sca), dt)
-            end do
+            call advecX_remesh_no_com(ind_group, gs, p_pos_adim, p_V, j, k, vectX, dt)
+            call advecX_remesh_no_com(ind_group, gs, p_pos_adim, p_V, j, k, vectY, dt)
+            call advecX_remesh_no_com(ind_group, gs, p_pos_adim, p_V, j, k, vectZ, dt)
 
         end do
     end do
 
-end subroutine advecZ_calc_Vect
-
+end subroutine advec_vector_X_basic_no_com
 
 
-!> ===== Private procedure =====
 end module advec_Vect
-!> @}
diff --git a/HySoP/src/scalesInterface/particles/advec_common_group.F90 b/HySoP/src/scalesInterface/particles/advec_common_group.F90
new file mode 100644
index 000000000..735d5457d
--- /dev/null
+++ b/HySoP/src/scalesInterface/particles/advec_common_group.F90
@@ -0,0 +1,185 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_common
+!
+!
+! DESCRIPTION:
+!> The module ``advec_common'' gather function and subroutines used to advec scalar
+!! which are not specific to a direction
+!! @details
+!! This module gathers functions and routines used to advec scalar which are not
+!! specific to a direction. This is a parallel implementation using MPI and
+!! the cartesien topology it provides. It also contains the variables common to
+!! the solver along each direction and other generic variables used for the
+!! advection based on the particle method.
+!!
+!! Except for testing purpose, this module is not supposed to be used by the
+!! main code but only by the other advection module. More precisly, an final user
+!! must only used the generic "advec" module wich contain all the interface to
+!! solve the advection equation with the particle method, and to choose the
+!! remeshing formula, the dimensionnal splitting and everything else.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_common
+
+    use precision_tools
+
+    ! Velocity interpolation at particle position
+    use advec_common_interpol ,only:AC_interpol_lin, AC_interpol_lin_no_com, &
+      & AC_interpol_plus, AC_interpol_plus_no_com
+    ! Particles remeshing
+    use advec_common_remesh,only: AC_setup_init,                &
+            & AC_remesh_setup_alongX, AC_remesh_setup_alongY, AC_remesh_setup_alongZ,&
+            & AC_remesh_lambda_group, AC_remesh_limit_lambda_group, AC_remesh_Mprime_group
+
+    implicit none
+
+    ! To get particle position - if particles are created everywhere
+    interface AC_get_p_pos_adim
+      module procedure AC_init_pos, AC_get_pos_V, AC_get_pos_other_mesh, AC_get_pos_other_mesh_big
+    end interface AC_get_p_pos_adim
+    public :: AC_get_p_pos_adim
+    private:: AC_init_pos
+    private:: AC_get_pos_V
+    private:: AC_get_pos_other_mesh
+
+
+contains
+
+!> Init particle position at mesh point
+!!   @param[out] p_pos = adimensionned particle position
+subroutine AC_init_pos(p_pos)
+
+    real(WP), dimension(:,:,:), intent(out) :: p_pos
+
+    integer :: i2,i1,i_p
+
+    do i2 = 1, size(p_pos,3)
+      do i1 = 1, size(p_pos,2)
+        do i_p = 1, size(p_pos,1)
+          p_pos(i_p,i1,i2) = i_p
+        end do
+      end do
+    end do
+    !do i_p = 1, size(p_pos,1)
+    !  p_pos(i_p,:,:) = i_p
+    !end do
+
+end subroutine AC_init_pos
+
+
+!> Init particle position (adimensionned by dx) at initial position + dt*velocity
+!!   @param[in]  p_pos  = adimensionned particle position
+!!   @param[in]  p_V    = particle velocity
+!!   @param[in]  dt     = time step
+!!   @param[in]  dx_sc  = spatial step for scalar
+!!   @param[in]  Np     = number of particle for each line (=number of mesh point along current direction)
+subroutine AC_get_pos_V(p_pos, p_V, dt, dx_sc, Np)
+
+    real(WP), dimension(:,:,:), intent(out) :: p_pos
+    real(WP), dimension(:,:,:), intent(in)  :: p_V
+    real(WP)                  , intent(in)  :: dt, dx_sc
+    integer                   , intent(in)  :: Np
+
+    integer :: i2,i1,i_p
+    real(WP):: coef
+
+    coef = dt/dx_sc
+    do i2 = 1, size(p_pos,3)
+      do i1 = 1, size(p_pos,2)
+        do i_p = 1, Np
+          p_pos(i_p,i1,i2) = i_p + coef*p_V(i_p,i1,i2)
+        end do
+      end do
+    end do
+    !do i_p = 1, size(p_pos,1)
+    !  p_pos(i_p,:,:) = i_p + coef*p_V(i_p,:,:)
+    !end do
+
+end subroutine AC_get_pos_V
+
+
+!> Init particle position (adimensionned by dx_V) at initial position +
+!! dt*velocity - use this variant if velocity and scalr resolution are different.
+!!   @param[in]  p_pos  = adimensionned particle position
+!!   @param[in]  p_V    = particle velocity
+!!   @param[in]  dt     = time step
+!!   @param[in]  dx_sc  = spatial step for scalar
+!!   @param[in]  dx_V   = spatial step for velocity
+!!   @param[in]  Np     = number of particle for each line (=number of mesh point along current direction)
+subroutine AC_get_pos_other_mesh(p_pos, p_V, dt, dx_sc, dx_V, Np)
+
+    real(WP), dimension(:,:,:), intent(out) :: p_pos
+    real(WP), dimension(:,:,:), intent(in)  :: p_V
+    real(WP)                  , intent(in)  :: dt, dx_sc, dx_V
+    integer                   , intent(in)  :: Np
+
+    integer :: i2,i1,i_p
+    real(WP):: coef1, coef2
+
+    coef1 = dx_sc/dx_V
+    coef2 = dt/dx_V
+    do i2 = 1, size(p_pos,3)
+      do i1 = 1, size(p_pos,2)
+        do i_p = 1, Np
+          p_pos(i_p,i1,i2) = (coef1*i_p) + (coef2*p_V(i_p,i1,i2))
+        end do
+      end do
+    end do
+    !do i_p = 1, size(p_pos,1)
+    !  p_pos(i_p,:,:) = (coef1*i_p) + (coef2*p_V(i_p,:,:))
+    !end do
+
+end subroutine AC_get_pos_other_mesh
+
+
+!> Init particle position (adimensionned by dx_V) at initial position +
+!! dt*velocity - use this variant if velocity and scalar resolution are different
+!! and if V_comp contain not only velocity for the current work item.
+!!   @param[in]  p_pos  = adimensionned particle position
+!!   @param[in]  p_V    = particle velocity
+!!   @param[in]  dt     = time step
+!!   @param[in]  dx_sc  = spatial step for scalar
+!!   @param[in]  dx_V   = spatial step for velocity
+!!   @param[in]  id1,id2= coordinate of the current work item
+!!   @param[in]  Np     = number of particle for each line (=number of mesh point along current direction)
+subroutine AC_get_pos_other_mesh_big(p_pos, p_V, dt, dx_sc, dx_V, Np, id1, id2)
+
+    real(WP), dimension(:,:,:), intent(out) :: p_pos
+    real(WP), dimension(:,:,:), intent(in)  :: p_V
+    real(WP)                  , intent(in)  :: dt, dx_sc, dx_V
+    integer                   , intent(in)  :: id1, id2, Np
+
+    integer :: i2,i1,i_p, idir1, idir2
+    real(WP):: coef1, coef2
+
+    idir1 = id1 - 1
+    idir2 = id2 - 1
+
+    coef1 = dx_sc/dx_V
+    coef2 = dt/dx_V
+
+    do i2 = 1, size(p_pos,3)
+      do i1 = 1, size(p_pos,2)
+        do i_p = 1, Np
+          p_pos(i_p,i1,i2) = (coef1*i_p) + (coef2*p_V(i_p,i1+idir1,i2+idir2))
+        end do
+      end do
+    end do
+    !do i_p = 1, size(p_pos,1)
+    !  p_pos(i_p,:,:) = (coef1*i_p) + (coef2*p_V(i_p,:,:))
+    !end do
+
+end subroutine AC_get_pos_other_mesh_big
+
+end module advec_common
diff --git a/HySoP/src/scalesInterface/particles/advec_common_interpol.F90 b/HySoP/src/scalesInterface/particles/advec_common_interpol.F90
new file mode 100644
index 000000000..0817fb9cd
--- /dev/null
+++ b/HySoP/src/scalesInterface/particles/advec_common_interpol.F90
@@ -0,0 +1,1302 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_common_velo
+!
+!
+! DESCRIPTION:
+!> The module ``advec_common_interpol'' gather function and subroutines used to interpolate
+!! some quantities (velocity for instance) at particle position. Theses tools are specific to a direction
+!! @details
+!! This module gathers functions and routines used to interpolate some field
+!! at scalar position. These subroutines are not specific to a direction.
+!! This is a parallel implementation using MPI and the cartesien topology it
+!! provides.
+!!
+!! Except for testing purpose, this module is not supposed to be used by the
+!! main code but only by the other advection module. More precisly, an final user
+!! must only used the generic "advec" module wich contain all the interface to
+!! solve the advection equation with the particle method, and to choose the
+!! remeshing formula, the dimensionnal splitting and everything else. Except for
+!! testing purpose, the other advection modules have only to include
+!! "advec_common".
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_common_interpol
+
+    use structure_tools
+    use advec_abstract_proc
+
+    implicit none
+
+
+    ! Information about the particles and their bloc
+    public
+
+
+    ! ===== Public procedures =====
+    !----- To interpolate velocity -----
+    public                        :: AC_interpol_lin
+    public                        :: AC_interpol_plus
+    public                        :: AC_interpol_lin_no_com
+    public                        :: AC_interpol_determine_communication
+
+    ! ===== Public variables =====
+
+    ! ===== Private variables =====
+
+
+contains
+
+! ===== Public procedure =====
+
+! ==================================================================================
+! ====================     Compute particle velocity (RK2)      ====================
+! ==================================================================================
+
+!> Interpolate the velocity field used in a RK2 scheme for particle advection -
+!! version for a group of (more of one) line
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        gs          = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        p_inter     = adimensionned particle postion in input ; return inteprolated field as output
+!!    @param[in,out]    V_comp      = field to interpolate at particle position
+!! @details
+!!    A RK2 scheme is used to advect the particles : the midlle point scheme. An
+!!    intermediary position "p_pos_bis(i) = p_pos(i) + V(i)*dt/2" is computed and then
+!!    the numerical velocity of each particles is computed as the interpolation of V  in
+!!    this point. This field is used to advect the particles at the seconde order in time :
+!!    p_pos(t+dt, i) = p_pos(i) + p_V(i).
+!!    The group line indice is used to ensure using unicity of each mpi message tag.
+!!    The interpolation is done for a group of lines, allowing to mutualise
+!!    communications. Considering a group of Na X Nb lines, communication performed
+!!    by this algorithm are around (Na x Nb) bigger than the alogorithm wich
+!!    works on a single line but also around (Na x Nb) less frequent.
+subroutine AC_interpol_lin(direction, gs, ind_group, V_comp, p_inter)
+
+    ! This code involve a recopy of V_comp. It is possible to directly use the 3D velocity field but in a such code
+    ! a memory copy is still needed to send velocity field to other processus : mpi send contiguous memory values
+
+    use mpi
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Ouput
+    integer, intent(in)                             :: direction    ! current direction
+    integer, dimension(2),intent(in)                :: gs           ! groupe size
+    integer, dimension(2), intent(in)               :: ind_group
+    real(WP), dimension(:,:,:), intent(inout)       :: p_inter
+    real(WP), dimension(:,:,:),intent(in),target    :: V_comp
+#ifdef BLOCKING_SEND_PLUS
+    real(WP)                                                    :: weight       ! interpolation weight storage
+#else
+    type(real_pter),dimension(mesh_sc%N_proc(direction),gs(1),gs(2))    :: Vp, Vm       ! Velocity on previous and next mesh point
+#endif
+    real(WP), dimension(:), allocatable, target                 :: V_buffer     ! Velocity buffer for postion outside of the local subdomain
+    integer, dimension(:), allocatable                          :: pos_in_buffer! buffer size
+    integer , dimension(gs(1), gs(2))           :: rece_ind_min ! minimal indice of mesh involved in remeshing particles (of my local subdomains)
+    integer , dimension(gs(1), gs(2))           :: rece_ind_max ! maximal indice of mesh involved in remeshing particles (of my local subdomains)
+    integer                                     :: ind, ind_com ! indices
+    integer                                     :: i1, i2       ! indices in the lines group
+    integer                                     :: pos, pos_old ! indices of the mesh point wich preceed the particle position
+    integer                                     :: proc_gap, gap! distance between my (mpi) coordonate and coordinate of the
+                                                                ! processus associated to a given position
+    integer                                     :: proc_end     ! final indice of processus associate to current pos
+    logical, dimension(2)                       :: myself
+    integer, dimension(:), allocatable          :: send_carto   ! cartogrpahy of what I have to send
+    integer                                     :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
+    integer                                     :: ind_for_i1   ! where to read the first coordinate (i1) of the current line inside the cartography ?
+    real(WP), dimension(:), allocatable         :: send_buffer  ! to store what I have to send (on a contiguous way)
+    integer, dimension(gs(1),gs(2),2)           :: rece_gap     ! distance between me and processus wich send me information
+    integer, dimension(2 , 2)                   :: send_gap     ! distance between me and processus to wich I send information
+    integer, dimension(2)                       :: rece_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer                                     :: com_size     ! size of message send/receive
+    integer, dimension(:), allocatable          :: size_com     ! size of message send/receive
+    integer                                     :: min_size     ! minimal size of cartography(:,proc_gap)
+    integer                                     :: max_size     ! maximal size of cartography(:,proc_gap)
+    integer                                     :: tag          ! mpi message tag
+    integer, dimension(:), allocatable          :: tag_proc     ! mpi message tag
+    integer                                     :: ierr         ! mpi error code
+#ifndef BLOCKING_SEND
+   integer, dimension(:), allocatable          :: s_request    ! mpi communication request (handle) of nonblocking send
+#endif
+    integer, dimension(:), allocatable          :: s_request_bis! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable          :: rece_request ! mpi communication request (handle) of nonblocking receive
+    integer, dimension(MPI_STATUS_SIZE)         :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(:,:), allocatable        :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block for wich the
+                                                                ! current processus requiers data from proc_gap and for each of these lines the range
+                                                                ! of mesh points from where it requiers the velocity values.
+
+    ! -- Initialisation --
+#ifndef BLOCKING_SEND_PLUS
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            do ind = 1, mesh_sc%N_proc(direction)
+                nullify(Vp(ind,i1,i2)%pter)
+                nullify(Vm(ind,i1,i2)%pter)
+            end do
+        end do
+    end do
+#endif
+
+    ! Compute range of the set of point where I need the velocity value
+    rece_ind_min = floor(p_inter(1,:,:))
+    rece_ind_max = floor(p_inter(mesh_sc%N_proc(direction),:,:)) + 1
+
+    ! ===== Exchange velocity field if needed =====
+    ! It uses non blocking message to do the computations during the communication process
+    ! -- What have I to communicate ? --
+    rece_gap(:,:,1) = floor(real(rece_ind_min-1)/mesh_sc%N_proc(direction))
+    rece_gap(:,:,2) = floor(real(rece_ind_max-1)/mesh_sc%N_proc(direction))
+    rece_gap_abs(1) = minval(rece_gap(:,:,1))
+    rece_gap_abs(2) = maxval(rece_gap(:,:,2))
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(cartography(max_size,rece_gap_abs(1):rece_gap_abs(2)))
+    call AC_interpol_determine_communication(direction, ind_group, gs, send_gap,  &
+    & rece_gap, rece_gap_abs, cartography)
+
+    ! -- Send messages about what I want --
+    allocate(s_request_bis(rece_gap_abs(1):rece_gap_abs(2)))
+    allocate(size_com(rece_gap_abs(1):rece_gap_abs(2)))
+    allocate(tag_proc(rece_gap_abs(1):rece_gap_abs(2)))
+    min_size = 2 + gs(2)
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            cartography(1,proc_gap) = 0
+            ! Use the cartography to know which lines are concerned
+            size_com(proc_gap) = cartography(2,proc_gap)
+            ! Range I want - store into the cartography
+            gap = proc_gap*mesh_sc%N_proc(direction)
+            ! Position in cartography(:,proc_gap) of the current i1 indice
+            ind_for_i1 = min_size
+            do i2 = 1, gs(2)
+                do ind = ind_for_i1+1, ind_for_i1 + cartography(2+i2,proc_gap), 2
+                    do i1 = cartography(ind,proc_gap), cartography(ind+1,proc_gap)
+                        ! Interval start from:
+                        cartography(size_com(proc_gap)+1,proc_gap) = max(rece_ind_min(i1,i2), gap+1) ! fortran => indice start from 0
+                        ! and ends at:
+                        cartography(size_com(proc_gap)+2,proc_gap) = min(rece_ind_max(i1,i2), gap+mesh_sc%N_proc(direction))
+                        ! update number of element to receive
+                        cartography(1,proc_gap) = cartography(1,proc_gap) &
+                                    & + cartography(size_com(proc_gap)+2,proc_gap) &
+                                    & - cartography(size_com(proc_gap)+1,proc_gap) + 1
+                        size_com(proc_gap) = size_com(proc_gap)+2
+                    end do
+                end do
+                ind_for_i1 = ind_for_i1 + cartography(2+i2,proc_gap)
+            end do
+            ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.
+            tag_proc(proc_gap) = compute_tag(ind_group, tag_velo_range, direction, proc_gap)
+            ! Send message
+#ifdef PART_DEBUG
+            if(size_com(proc_gap)>max_size) then
+                print*, 'rank = ', cart_rank, ' -- bug sur taille cartography a envoyer'
+                print*, 'taille carto = ', com_size, ' plus grand que la taille théorique ', &
+                    & max_size, ' et carto = ', cartography(:,proc_gap)
+            end if
+#endif
+            call mpi_ISsend(cartography(1,proc_gap), size_com(proc_gap), MPI_INTEGER,   &
+                & neighbors(direction,proc_gap), tag_proc(proc_gap), D_comm(direction), &
+                & s_request_bis(proc_gap),ierr)
+        end if
+    end do
+
+
+    ! -- Non blocking reception of the velocity field --
+    ! Allocate the pos_in_buffer to compute V_buffer size and to be able to
+    ! allocate it.
+    allocate(pos_in_buffer(rece_gap_abs(1):rece_gap_abs(2)))
+    pos_in_buffer(rece_gap_abs(1)) = 1
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)-1
+        pos_in_buffer(proc_gap+1)= pos_in_buffer(proc_gap) + cartography(1,proc_gap)
+    end do
+    allocate(V_buffer(pos_in_buffer(rece_gap_abs(2)) &
+                & + cartography(1,rece_gap_abs(2))))
+    V_buffer = 0
+    allocate(rece_request(rece_gap_abs(1):rece_gap_abs(2)))
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            ! IIa - Compute reception tag
+            tag = compute_tag(ind_group, tag_velo_V, direction, -proc_gap)
+            ! IIb - Receive message
+            call mpi_Irecv(V_buffer(pos_in_buffer(proc_gap)), cartography(1,proc_gap), MPI_DOUBLE_PRECISION, &
+                    & neighbors(direction,proc_gap), tag, D_comm(direction), rece_request(proc_gap), ierr)
+        end if
+    end do
+
+    ! -- Send the velocity field to processus which need it --
+#ifndef BLOCKING_SEND
+   allocate(s_request(send_gap(1,1):send_gap(1,2)))
+#endif
+    allocate(send_carto(max_size))
+! XXX Todo : compter le nombre de messages à recevoir puis les traiter dans
+! l'ordre où ils arrivent via un MPI_ANY_PROC ? Mais alors il faut lier rang et
+! coordonnées ... ce qui signifie ajouter un appel à un mpi_cart_cood ... ou
+! envoyer le rand dans la cartographie !!
+! A voir ce qui est le mieux.
+    do proc_gap = send_gap(1,1), send_gap(1,2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            ! I - Receive messages about what I have to send
+            ! Ia - Compute reception tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.
+            tag = compute_tag(ind_group, tag_velo_range, direction, -proc_gap)
+            ! Ib - Receive the message
+            call mpi_recv(send_carto(1), max_size, MPI_INTEGER, neighbors(direction,proc_gap), &
+              & tag, D_comm(direction), rece_status, ierr)
+            ! II - Send it
+            ! IIa - Create send buffer
+            allocate(send_buffer(send_carto(1)))
+            gap = proc_gap*mesh_sc%N_proc(direction)
+            com_size = 0
+            ind_1Dtable = send_carto(2)
+            ! Position in cartography(:,proc_gap) of the current i1 indice
+            ind_for_i1 = min_size
+            do i2 = 1, gs(2)
+                do ind = ind_for_i1+1, ind_for_i1 + send_carto(2+i2), 2
+                    do i1 = send_carto(ind), send_carto(ind+1)
+                        do ind_com = send_carto(ind_1Dtable+1)+gap, send_carto(ind_1Dtable+2)+gap ! indice inside the current line
+                            com_size = com_size + 1
+                            send_buffer(com_size) = V_comp(ind_com, i1,i2)
+                        end do
+                        ind_1Dtable = ind_1Dtable + 2
+                    end do
+                end do
+                ind_for_i1 = ind_for_i1 + send_carto(2+i2)
+            end do
+            ! IIa_bis - check correctness
+#ifdef PART_DEBUG
+            if(com_size/=send_carto(1)) then
+                print*, 'rank = ', cart_rank, ' -- bug sur taille champ de vitesse a envoyer'
+                print*, 'taille carto = ', com_size, ' plus grand recu ', &
+                    & send_carto(1), ' et carto = ', send_carto(:)
+            end if
+#endif
+            ! IIb - Compute send tag
+            tag = compute_tag(ind_group, tag_velo_V, direction, proc_gap)
+            ! IIc - Send message
+#ifdef BLOCKING_SEND
+            call mpi_Send(send_buffer(1), com_size, MPI_DOUBLE_PRECISION,  &
+                    & neighbors(direction,proc_gap), tag, D_comm(direction),&
+                    & ierr)
+#else
+           call mpi_ISend(send_buffer(1), com_size, MPI_DOUBLE_PRECISION,  &
+                   & neighbors(direction,proc_gap), tag, D_comm(direction),&
+                   & s_request(proc_gap), ierr)
+#endif
+            deallocate(send_buffer)
+        end if
+    end do
+    deallocate(send_carto)
+
+    !-- Free som ISsend buffer and some array --
+! XXX Todo : préférer un call MPI_WAIT_ALL couplé avec une init de s_request_bis
+! sur MPI_REQUEST_NULL et enlever la boucle ET le if.
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            call MPI_WAIT(s_request_bis(proc_gap),rece_status,ierr)
+        end if
+    end do
+    deallocate(s_request_bis)
+    deallocate(cartography) ! We do not need it anymore
+    deallocate(tag_proc)
+    deallocate(size_com)
+
+#ifdef BLOCKING_SEND_PLUS
+    ! -- Compute the interpolate velocity --
+    ! Check if communication are done
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap)/=D_rank(direction)) then
+            call mpi_wait(rece_request(proc_gap), rece_status, ierr)
+        end if
+    end do
+    deallocate(rece_request)
+#endif
+
+    ! ===== Compute the interpolated velocity =====
+    ! -- Compute the interpolation weight and update the pointers Vp and Vm --
+    pos_in_buffer = pos_in_buffer - 1
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            ! Initialisation of reccurence process
+            ind = 1
+            pos = floor(p_inter(ind,i1,i2))
+#ifndef BLOCKING_SEND_PLUS
+            p_inter(ind,i1,i2) = p_inter(ind,i1,i2)-pos
+#else
+            weight = p_inter(ind,i1,i2)-pos
+#endif
+            ! Vm = V(pos)
+            proc_gap = floor(real(pos-1)/mesh_sc%N_proc(direction))
+            if (neighbors(direction,proc_gap) == D_rank(direction)) then
+#ifndef BLOCKING_SEND_PLUS
+              Vm(ind,i1,i2)%pter => V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#else
+              p_inter(ind,i1,i2) = (1._WP-weight)*V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+              myself(1) = .true.
+            else
+              pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1  ! XXX New version only
+#ifndef BLOCKING_SEND_PLUS
+              Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+#else
+              p_inter(ind,i1,i2) = (1._WP-weight)*V_buffer(pos_in_buffer(proc_gap))
+#endif
+              myself(1) = .false.
+            end if
+            ! Vp = V(pos+1)
+            gap = floor(real(pos+1-1)/mesh_sc%N_proc(direction))
+            if (neighbors(direction,gap) == D_rank(direction)) then
+#ifndef BLOCKING_SEND_PLUS
+              Vp(ind,i1,i2)%pter => V_comp(pos+1-gap*mesh_sc%N_proc(direction), i1,i2)
+#else
+              p_inter(ind,i1,i2) = p_inter(ind,i1,i2) + weight*V_comp(pos+1-gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+            else
+              pos_in_buffer(gap) = pos_in_buffer(gap) + 1  ! XXX New version only
+#ifndef BLOCKING_SEND_PLUS
+              Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(gap))
+#else
+              p_inter(ind,i1,i2) = p_inter(ind,i1,i2) + weight*V_buffer(pos_in_buffer(gap))
+#endif
+            end if
+            pos_old = pos
+            proc_end = (proc_gap+1)*mesh_sc%N_proc(direction)
+            myself(2) = (neighbors(direction,proc_gap+1) == D_rank(direction))
+
+
+            ! XXX New version XXX
+            ! Following indice: new version
+            ind = 2
+            if (ind<=mesh_sc%N_proc(direction)) pos = floor(p_inter(ind,i1,i2))
+            do while (ind<=mesh_sc%N_proc(direction))
+              !pos = floor(p_inter(ind,i1,i2))
+              if(myself(1)) then
+                ! -- Inside the current block, it is always the same --
+                do while ((pos<proc_end).and.(ind<mesh_sc%N_proc(direction)))
+                  ! Computation for current step
+#ifndef BLOCKING_SEND_PLUS
+                  p_inter(ind,i1,i2) = p_inter(ind,i1,i2)-pos
+                  Vm(ind,i1,i2)%pter => V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+                  Vp(ind,i1,i2)%pter => V_comp(pos+1-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#else
+                  !weight = p_inter(ind,i1,i2)-pos
+                  !p_inter = weight*Vp + (1-weight)*Vm = weight*(Vp-Vm) + Vm
+                  p_inter(ind,i1,i2) = (p_inter(ind,i1,i2)-pos)*(V_comp(pos+1-proc_gap*mesh_sc%N_proc(direction), i1,i2) &
+                    & - V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)) + V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                  ! Prepare next step
+                  pos_old = pos
+                  ind = ind + 1
+                  pos = floor(p_inter(ind,i1,i2))
+                end do ! ((pos<proc_end).and.(ind<mesh_sc%N_proc(direction)))
+                ! -- When we are exactly on the subdomain transition --
+                do while ((pos==proc_end).and.(ind<mesh_sc%N_proc(direction)))
+#ifndef BLOCKING_SEND_PLUS
+                  p_inter(ind,i1,i2) = p_inter(ind,i1,i2)-pos
+                  ! Vm is in the same sub-domain
+                  Vm(ind,i1,i2)%pter => V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                  ! Vp is in the next one (proc_gap+1)
+                  if(myself(2)) then
+#ifndef BLOCKING_SEND_PLUS
+                    Vp(ind,i1,i2)%pter => V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2)
+#else
+                    p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)*         &
+                      & (V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2) &
+                      & - V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2))    )&
+                      &  + V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                  else
+                    ! If pos = pos_old, we must have pos_in_buffer(proc_gap+1) += 0 (no changes)
+                    ! Else pos>pos_old, we must have pos_in_buffer(proc_gap+1) += 1
+                    ! We use that min(1,pos-pos_old)   = 0 if pos=pos_old, 1 else
+                    pos_in_buffer(proc_gap+1) = pos_in_buffer(proc_gap+1) + min(1,pos-pos_old)
+#ifndef BLOCKING_SEND_PLUS
+                    Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap+1))
+#else
+                    p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)*(V_buffer(pos_in_buffer(proc_gap+1)) &
+                    & - V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2))) + V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                  end if
+                  ! Prepare next step
+                  pos_old = pos
+                  ind = ind + 1
+                  pos = floor(p_inter(ind,i1,i2))
+                end do
+                ! -- When we reach the end of the sub-domain OR the end of the particle line --
+                if (pos>proc_end) then  ! Changement of subdomain
+                  ! We have reach the next subdomain => update values
+                  proc_gap = floor(real(pos-1)/mesh_sc%N_proc(direction)) ! "proc_gap = proc_gap + 1" does not work if N_proc = 1 and pos-pos_old = 2.
+                  myself(1) = (neighbors(direction,proc_gap) == D_rank(direction)) ! For the same reason that line jsute above, we do not use "myself(1) = myself(2)"
+                  proc_end = (proc_gap+1)*mesh_sc%N_proc(direction)
+                  myself(2) = (neighbors(direction,proc_gap+1) == D_rank(direction))
+                  ! ... and go on the next loop !
+                else ! ind == N_proc and no changement of subdomain
+#ifndef BLOCKING_SEND_PLUS
+                  ! Computation for current step
+                  p_inter(ind,i1,i2) = p_inter(ind,i1,i2)-pos
+                  ! Vm
+                  Vm(ind,i1,i2)%pter => V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                  ! Vp
+                  if(pos<proc_end) then
+#ifndef BLOCKING_SEND_PLUS
+                    Vp(ind,i1,i2)%pter => V_comp(pos+1-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#else
+                    p_inter(ind,i1,i2) = (p_inter(ind,i1,i2)-pos)*(V_comp(pos+1-proc_gap*mesh_sc%N_proc(direction), i1,i2) &
+                      & - V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)) + V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                  else ! pos+1 is in next subdomain: use the same algorithm than line 377-390
+                    if(myself(2)) then
+#ifndef BLOCKING_SEND_PLUS
+                      Vp(ind,i1,i2)%pter => V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2)
+#else
+                      p_inter(ind,i1,i2) = (p_inter(ind,i1,i2)-pos)*(V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2) &
+                        & - V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)) + V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                    else
+                      ! If pos = pos_old, we must have pos_in_buffer(proc_gap+1) += 0 (no changes)
+                      ! Else pos>pos_old, we must have pos_in_buffer(proc_gap+1) += 1
+                      ! We use that min(1,pos-pos_old)   = 0 if pos=pos_old, 1 else
+                      pos_in_buffer(proc_gap+1) = pos_in_buffer(proc_gap+1) + min(1,pos-pos_old)
+#ifndef BLOCKING_SEND_PLUS
+                      Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap+1))
+#else
+                      p_inter(ind,i1,i2) = (p_inter(ind,i1,i2)-pos)*(V_buffer(pos_in_buffer(proc_gap+1)) &
+                        & - V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)) + V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                    end if
+                  end if
+                  ! Go to the next (i1,i2) value: ind must be greater than N_proc
+                  ind = ind +1
+                end if
+              else ! => not myself(1)
+                ! -- Inside the current block, it is always the same --
+                do while ((pos<proc_end).and.(ind<mesh_sc%N_proc(direction)))
+                  ! Computation for current step
+                  pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + pos-pos_old
+#ifndef BLOCKING_SEND_PLUS
+                  p_inter(ind,i1,i2) = p_inter(ind,i1,i2)-pos
+                  Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap)-1)
+                  Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+#else
+                  p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)*(V_buffer(pos_in_buffer(proc_gap)) &
+                      & - V_buffer(pos_in_buffer(proc_gap)-1))) + V_buffer(pos_in_buffer(proc_gap)-1)
+#endif
+                  ! Prepare next step
+                  pos_old = pos
+                  ind = ind + 1
+                  pos = floor(p_inter(ind,i1,i2))
+                end do
+                ! -- When we are exactly on the subdomain transition --
+                do while ((pos==proc_end).and.(ind<mesh_sc%N_proc(direction)))
+                  ! If pos = pos_old, we must have  pos_in_buffer(proc_gap) += 0
+                  !                             and pos_in_buffer(proc_gap+1) += 0 (no changes)
+                  ! Else pos>pos_old, we must have pos_in_buffer(proc_gap) += (pos-pos_old -1)
+                  !                             and pos_in_buffer(proc_gap+1) += 1
+                  ! We use max(0,pos-pos_old-1) = 0 if pos=pos_old, (pos-pos_old-1) else.
+                  !    and min(1,pos-pos_old)   = 0 if pos=pos_old, 1 else
+                  ! Vm is in the same sub-domain
+                  pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + max(0,pos-pos_old-1)
+#ifndef BLOCKING_SEND_PLUS
+                 p_inter(ind,i1,i2) = p_inter(ind,i1,i2)-pos
+                 Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+#endif
+                  ! Vp is in the next one
+                  if(myself(2)) then
+#ifndef BLOCKING_SEND_PLUS
+                    Vp(ind,i1,i2)%pter => V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2)
+#else
+                    p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)* &
+                      & (V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2)   &
+                      & - V_buffer(pos_in_buffer(proc_gap))              ) )&
+                      & + V_buffer(pos_in_buffer(proc_gap))
+#endif
+                  else
+                    pos_in_buffer(proc_gap+1) = pos_in_buffer(proc_gap+1) + min(1,pos-pos_old)
+#ifndef BLOCKING_SEND_PLUS
+                    Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap+1))
+#else
+                    p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)* &
+                      & (V_buffer(pos_in_buffer(proc_gap+1))      &
+                      & - V_buffer(pos_in_buffer(proc_gap)) )    )&
+                      & + V_buffer(pos_in_buffer(proc_gap))
+#endif
+                  end if
+                  ! Prepare next step
+                  pos_old = pos
+                  ind = ind + 1
+                  pos = floor(p_inter(ind,i1,i2))
+                end do
+                ! -- When we reach the end of the sub-domain OR the end of the particle line --
+                if (pos>proc_end) then  ! Changement of subdomain
+                  ! We have reach the next subdomain => update values
+                  proc_gap = floor(real(pos-1)/mesh_sc%N_proc(direction)) ! "proc_gap = proc_gap + 1" does not work if N_proc = 1 and pos-pos_old = 2.
+                  myself(1) = (neighbors(direction,proc_gap) == D_rank(direction)) ! For the same reason that line jsute above, we do not use "myself(1) = myself(2)"
+                  proc_end = (proc_gap+1)*mesh_sc%N_proc(direction)
+                  myself(2) = (neighbors(direction,proc_gap+1) == D_rank(direction))
+                  ! ... and go on the next loop !
+                else ! ind == N_proc and no changement of subdomain
+                  ! Computation for current step
+#ifndef BLOCKING_SEND_PLUS
+                 p_inter(ind,i1,i2) = p_inter(ind,i1,i2)-pos
+#endif
+                  if (pos<proc_end) then
+                    pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + pos-pos_old
+#ifndef BLOCKING_SEND_PLUS
+                    Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap)-1)
+                    Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+#else
+                    p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)*(V_buffer(pos_in_buffer(proc_gap)) &
+                        & - V_buffer(pos_in_buffer(proc_gap)-1))) + V_buffer(pos_in_buffer(proc_gap)-1)
+#endif
+                  else ! pos=proc_end : same as in line 440 to 462
+                    pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + max(0,pos-pos_old-1)
+#ifndef BLOCKING_SEND_PLUS
+                    Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+#endif
+                    ! Vp is in the next one
+                    if(myself(2)) then
+#ifndef BLOCKING_SEND_PLUS
+                      Vp(ind,i1,i2)%pter => V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2)
+#else
+                      p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)*(V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2) &
+                        & - V_buffer(pos_in_buffer(proc_gap)))) + V_buffer(pos_in_buffer(proc_gap))
+#endif
+                    else
+                      pos_in_buffer(proc_gap+1) = pos_in_buffer(proc_gap+1) + min(1,pos-pos_old)
+#ifndef BLOCKING_SEND_PLUS
+                      Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap+1))
+#else
+                      p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)*(V_buffer(pos_in_buffer(proc_gap+1)) &
+                        & - V_buffer(pos_in_buffer(proc_gap)))) + V_buffer(pos_in_buffer(proc_gap))
+#endif
+                    end if
+                  end if
+                  ! Go to the next (i1,i2) value: ind must be greater than N_proc
+                  ind = ind +1
+                end if  ! pos>proc_end
+              end if ! myself(1)
+            end do ! (ind<mesh_sc%N_proc(direction)
+
+        end do ! loop on first coordinate (i1) of a line inside the block of line
+    end do ! loop on second coordinate (i2) of a line inside the block of line
+
+    deallocate(pos_in_buffer)   ! We do not need it anymore
+
+#ifndef BLOCKING_SEND_PLUS
+    ! -- Compute the interpolate velocity --
+    ! Check if communication are done
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap)/=D_rank(direction)) then
+            call mpi_wait(rece_request(proc_gap), rece_status, ierr)
+        end if
+    end do
+    deallocate(rece_request)
+#endif
+
+    ! Then compute the field
+#ifndef BLOCKING_SEND_PLUS
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            do ind = 1, mesh_sc%N_proc(direction)
+                p_inter(ind,i1,i2) = p_inter(ind,i1,i2)*Vp(ind,i1,i2)%pter + (1.-p_inter(ind,i1,i2))*Vm(ind,i1,i2)%pter
+            end do
+        end do
+    end do
+#endif
+
+
+    ! ===== Free memory =====
+    ! -- Pointeur --
+#ifndef BLOCKING_SEND_PLUS
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            do ind = 1, mesh_sc%N_proc(direction)
+                nullify(Vp(ind,i1,i2)%pter)
+                nullify(Vm(ind,i1,i2)%pter)
+            end do
+        end do
+    end do
+#endif
+#ifndef BLOCKING_SEND
+    ! -- Mpi internal buffer for non blocking communication --
+    do proc_gap = send_gap(1,1), send_gap(1,2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            call MPI_WAIT(s_request(proc_gap),rece_status,ierr)
+        end if
+    end do
+    deallocate(s_request)
+#endif
+    ! -- Deallocate dynamic array --
+    deallocate(V_buffer)
+
+end subroutine AC_interpol_lin
+
+
+!> Determine the set of processes wich will send me information during the velocity interpolation and compute
+!! for each of these processes the range of wanted data.
+!!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]    gp_s            = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]    ind_group       = coordinate of the current group of lines
+!!    @param[in]    ind_group       = coordinate of the current group of lines
+!!    @param[out]   send_gap        = gap between my coordinate and the processes of minimal coordinate which will send information to me
+!!    @param[in]    rece_gap        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+!!    @param[in]    rece_gap_abs    = min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+!!    @param[out]   cartography     = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!! @details
+!!    Work on a group of line of size gs(1) x gs(2))
+!!    Obtain the list of processus wich need a part of my local velocity field
+!!    to interpolate the velocity used in the RK2 scheme to advect its particles.
+!!    In the same time, it computes for each processus from which I need a part
+!!    of the velocity field, the range of mesh point where I want data and store it
+!!    by using some sparse matrix technics (see cartography defined in the
+!!    algorithm documentation)
+subroutine AC_interpol_determine_communication(direction, ind_group, gs, send_gap,  &
+    & rece_gap, rece_gap_abs, cartography)
+! XXX Work only for periodic condition.
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+    use mpi
+
+
+    ! Input/Ouput
+    integer, intent(in)                                 :: direction
+    integer, dimension(2), intent(in)                   :: ind_group
+    integer, dimension(2), intent(in)                   :: gs
+    integer, dimension(gs(1), gs(2), 2), intent(in)     :: rece_gap
+    integer, dimension(2), intent(in)                   :: rece_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2, 2), intent(out)               :: send_gap
+    integer, dimension(2+gs(2)*(2+3*gs(1)), &
+        & rece_gap_abs(1):rece_gap_abs(2)), intent(out) :: cartography
+    ! Others
+    integer                             :: proc_gap         ! gap between a processus coordinate (along the current
+                                                            ! direction) into the mpi-topology and my coordinate
+    integer, dimension(gs(1), gs(2))    :: rece_gapP        ! gap between the coordinate of the previous processus (in the current direction)
+                                                            ! and the processes of maximal coordinate which will receive information from it
+    integer, dimension(gs(1), gs(2))    :: rece_gapN        ! same as above but for the next processus
+    integer                             :: send_request_gh  ! mpi status of noindicelocking send
+    integer                             :: send_request_gh2 ! mpi status of noindicelocking send
+    integer                             :: ierr             ! mpi error code
+    integer, dimension(2)               :: tag_table        ! some mpi message tag
+    logical, dimension(:,:), allocatable:: test_request     ! for mpi non blocking communication
+    integer, dimension(:,:), allocatable:: send_request     ! for mpi non blocking send
+    integer                             :: ind1, ind2       ! indice of the current line inside the group
+    integer,dimension(2)                :: rece_buffer      ! buffer for reception of rece_max
+    integer, dimension(:,:), allocatable:: first, last      ! Storage processus to which I will be the first (or the last) to receive
+    integer                             :: min_size         ! begin indice in first and last to stock indice along first dimension of the group line
+    integer                             :: gp_size          ! group size
+    logical                             :: begin_interval   ! ware we in the start of an interval ?
+    logical                             :: not_myself       ! Is the target processus myself ?
+    integer, dimension(MPI_STATUS_SIZE) :: statut
+
+    send_gap(1,1) = 3*mesh_sc%N(direction)
+    send_gap(1,2) = -3*mesh_sc%N(direction)
+    send_gap(2,:) = 0
+    gp_size = gs(1)*gs(2)
+
+    ! ===== Communicate with my neigbors -> obtain ghost ! ====
+    ! Inform that about processus from which I need information
+    tag_table = compute_tag(ind_group, tag_obtrec_ghost_NP, direction)
+    call mpi_ISsend(rece_gap(1,1,1), gp_size, MPI_INTEGER, neighbors(direction,-1), tag_table(1), &
+        & D_comm(direction), send_request_gh, ierr)
+    call mpi_ISsend(rece_gap(1,1,2), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(2), &
+        & D_comm(direction), send_request_gh2, ierr)
+    ! Receive the same message form my neighbors
+    call mpi_recv(rece_gapN(1,1), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(1), D_comm(direction), statut, ierr)
+    call mpi_recv(rece_gapP(1,1), gp_size, MPI_INTEGER, neighbors(direction,-1), tag_table(2), D_comm(direction), statut, ierr)
+
+    ! ===== Compute if I am first or last and determine the carography =====
+    min_size = 2 + gs(2)
+    ! Initialize first and last to determine if I am the the first or the last processes (considering the current direction)
+        ! to require information from this processus
+    allocate(first(2,rece_gap_abs(1):rece_gap_abs(2)))
+    first(2,:) = 0  ! number of lines for which I am the first
+    allocate(last(2,rece_gap_abs(1):rece_gap_abs(2)))
+    last(2,:) = 0   ! number of lines for which I am the last
+    ! Initialize cartography
+    cartography(1,:) = 0            ! number of velocity values to receive
+    cartography(2,:) = min_size     ! number of element to send when sending cartography
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        first(1,proc_gap) = -proc_gap
+        last(1,proc_gap) = -proc_gap
+        not_myself = (neighbors(direction,proc_gap) /= D_rank(direction)) ! Is the target processus myself ?
+        do ind2 = 1, gs(2)
+            cartography(2+ind2,proc_gap) = 0    ! 2 x number of interval of concern line into the column i2
+            begin_interval = .true.
+            do ind1 = 1, gs(1)
+                ! Does proc_gap belongs to [rece_gap(i1,i2,1);rece_gap(i1,i2,2)]?
+                if((proc_gap>=rece_gap(ind1,ind2,1)).and.(proc_gap<=rece_gap(ind1,ind2,2))) then
+                    ! Compute if I am the first.
+                    if (proc_gap>rece_gapP(ind1,ind2)-1) then
+                        first(2,proc_gap) =  first(2,proc_gap)+1
+                    end if
+                    ! Compute if I am the last.
+                    if (proc_gap<rece_gapN(ind1,ind2)+1) then
+                        last(2,proc_gap) =  last(2,proc_gap)+1
+                    end if
+                    ! Update cartography // Not need I target processus is myself
+                    if (not_myself) then
+                        if (begin_interval) then
+                            cartography(2+ind2,proc_gap) =  cartography(2+ind2,proc_gap)+2
+                            cartography(cartography(2,proc_gap)+1,proc_gap) = ind1
+                            cartography(2,proc_gap) = cartography(2,proc_gap) + 2
+                            cartography(cartography(2,proc_gap),proc_gap) = ind1
+                            begin_interval = .false.
+                        else
+                            cartography(cartography(2,proc_gap),proc_gap) = ind1
+                        end if
+                    end if
+                else
+                    begin_interval = .true.
+                end if
+            end do
+        end do
+    end do
+
+    ! ===== Free Isend buffer from first communication =====
+    call MPI_WAIT(send_request_gh,statut,ierr)
+    call MPI_WAIT(send_request_gh2,statut,ierr)
+
+    ! ===== Send information about first and last  =====
+    tag_table = compute_tag(ind_group, tag_obtrec_NP, direction)
+    allocate(send_request(rece_gap_abs(1):rece_gap_abs(2),2))
+    allocate(test_request(rece_gap_abs(1):rece_gap_abs(2),2))
+    test_request = .false.
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        ! I am the first ?
+        if (first(2,proc_gap)>0) then
+            if(neighbors(direction,proc_gap)/= D_rank(direction)) then
+                call mpi_ISsend(first(1,proc_gap), 2, MPI_INTEGER, neighbors(direction,proc_gap),&
+                        & tag_table(1), D_comm(direction), send_request(proc_gap,1), ierr)
+                test_request(proc_gap,1) = .true.
+            else
+                send_gap(1,1) = min(send_gap(1,1), -proc_gap)
+                send_gap(2,1) = send_gap(2,1) + first(2,proc_gap)
+            end if
+        end if
+        ! I am the last ?
+        if (last(2,proc_gap)>0) then
+            if(neighbors(direction,proc_gap)/= D_rank(direction)) then
+                call mpi_ISsend(last(1,proc_gap), 2, MPI_INTEGER, neighbors(direction,proc_gap),&
+                        &  tag_table(2), D_comm(direction), send_request(proc_gap,2), ierr)
+                test_request(proc_gap,2) = .true.
+            else
+                send_gap(1,2) = max(send_gap(1,2), -proc_gap)
+                send_gap(2,2) = send_gap(2,2) + last(2,proc_gap)
+            end if
+        end if
+    end do
+
+
+
+    ! ===== Receive information form the first and the last processus which need a part of my local velocity field =====
+    do while(send_gap(2,1) < gp_size)
+        call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+        send_gap(1,1) = min(send_gap(1,1), rece_buffer(1))
+        send_gap(2,1) = send_gap(2,1) + rece_buffer(2)
+    end do
+    do while(send_gap(2,2) < gp_size)
+        call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+        send_gap(1,2) = max(send_gap(1,2), rece_buffer(1))
+        send_gap(2,2) = send_gap(2,2) + rece_buffer(2)
+    end do
+
+    ! ===== Free Isend buffer =====
+    !call MPI_WAIT(send_request_gh,statut,ierr)
+    !call MPI_WAIT(send_request_gh2,statut,ierr)
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (test_request(proc_gap,1).eqv. .true.) call MPI_WAIT(send_request(proc_gap,1),statut,ierr)
+        if (test_request(proc_gap,2)) call MPI_WAIT(send_request(proc_gap,2),statut,ierr)
+    end do
+    deallocate(send_request)
+    deallocate(test_request)
+
+    ! ===== Deallocate array =====
+    deallocate(first)
+    deallocate(last)
+
+end subroutine AC_interpol_determine_communication
+
+
+!> Interpolate the velocity field used in a RK2 scheme for particle advection -
+!! version for direction with no domain subdivision ands thus no required
+!! communications
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        gs          = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]        V_comp      = velocity to interpolate
+!!    @param[in,out]    p_V         = particle position in input and particle velocity (along the current direction) as output
+!! @details
+!!    A RK2 scheme is used to advect the particles : the midlle point scheme. An
+!!    intermediary position "p_pos_bis(i) = p_pos(i) + V(i)*dt/2" is computed and then
+!!    the numerical velocity of each particles is computed as the interpolation of V  in
+!!    this point. This field is used to advect the particles at the seconde order in time :
+!!    p_pos(t+dt, i) = p_pos(i) + p_V(i).
+!!    Variant for cases with no required communication.
+subroutine AC_interpol_lin_no_com(direction, gs, V_comp, p_V)
+
+    ! This code involve a recopy of p_V. It is possible to directly use the 3D velocity field but it will also limit the meroy access.
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Ouput
+    integer, intent(in)                             :: direction    ! current direction
+    integer, dimension(2),intent(in)                :: gs           ! groupe size
+    real(WP), dimension(:,:,:), intent(in)          :: V_comp
+    real(WP), dimension(:,:,:), intent(inout)       :: p_V
+    ! Others, local
+    integer                                             :: ind          ! indices
+    integer                                             :: i1, i2       ! indices in the lines group
+    integer                                             :: pos          ! indices of the mesh point wich preceed the particle position
+
+
+    ! ===== Compute the interpolated velocity =====
+    ! -- Compute the interpolation weight and update the velocity directly in p_V --
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            do ind = 1, mesh_sc%N(direction)
+
+            pos = floor(p_V(ind,i1,i2))
+            p_V(ind,i1,i2) = V_comp(modulo(pos-1,mesh_sc%N(direction))+1,i1,i2) + (p_V(ind,i1,i2)-pos)* &
+                & (V_comp(modulo(pos,mesh_sc%N(direction))+1,i1,i2)-V_comp(modulo(pos-1,mesh_sc%N(direction))+1,i1,i2))
+
+            end do ! loop on particle indice (ind)
+        end do ! loop on first coordinate (i1) of a line inside the block of line
+    end do ! loop on second coordinate (i2) of a line inside the block of line
+
+end subroutine AC_interpol_lin_no_com
+
+
+!> Interpolate the velocity field from coarse grid at particles positions
+!! version for a group of (more of one) line
+!!    @param[in]        dt          = time step
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        gs          = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]        ind_group   = indices of the current work item
+!!    @param[in]        id1         = first coordinate of the current work item related to the total local mesh
+!!    @param[in]        id2         = first coordinate of the current work item related to the total local mesh
+!!    @param[in]        V_coarse    = velocity to interpolate
+!!    @param[in,out]    p_V         = particle position in input and particle velocity (along the current direction) as output
+!! @details
+!!    A RK2 scheme is used to advect the particles : the midlle point scheme. An
+!!    intermediary position "p_pos_bis(i) = p_pos(i) + V(i)*dt/2" is computed and then
+!!    the numerical velocity of each particles is computed as the interpolation of V  in
+!!    this point. This field is used to advect the particles at the seconde order in time :
+!!    p_pos(t+dt, i) = p_pos(i) + p_V(i).
+!!    The group line indice is used to ensure using unicity of each mpi message tag.
+!!    The interpolation is done for a group of lines, allowing to mutualise
+!!    communications. Considering a group of Na X Nb lines, communication performed
+!!    by this algorithm are around (Na x Nb) bigger than the alogorithm wich
+!!    works on a single line but also around (Na x Nb) less frequent.
+subroutine AC_interpol_plus(direction, gs, ind_group, id1, id2, V_coarse, p_V)
+
+    use mpi
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+    !use Interpolation_velo
+    use interpolation_velo, only : get_weight, stencil_g, stencil_d, stencil_size
+
+    implicit none
+
+    ! Input/Ouput
+    integer                   , intent(in)          :: direction    ! current direction
+    integer, dimension(2)     , intent(in)          :: gs           ! groupe size
+    integer, dimension(2)     , intent(in)          :: ind_group
+    integer                   , intent(in)          :: id1, id2
+    real(WP), dimension(:,:,:), intent(inout)       :: p_V
+    real(WP), dimension(:,:,:), intent(in)          :: V_coarse     ! velocity on coarse grid
+    ! Local
+    integer                                     :: idir1, idir2 ! = (id1, id2) -1 as array indice starts from 1.
+    real(WP), dimension(stencil_size)           :: weight       ! interpolation weight storage
+    real(WP), dimension(:), allocatable         :: V_buffer     ! Velocity buffer for postion outside of the local subdomain
+    integer, dimension(:), allocatable          :: pos_in_buffer! buffer size
+    integer , dimension(gs(1), gs(2))           :: rece_ind_min ! minimal indice of mesh involved in remeshing particles (of my local subdomains)
+    integer , dimension(gs(1), gs(2))           :: rece_ind_max ! maximal indice of mesh involved in remeshing particles (of my local subdomains)
+    integer                                     :: ind, ind_com, V_ind ! indices
+    integer                                     :: i_limit, i, ind_gap
+    integer                                     :: i1, i2       ! indices in the lines group
+    integer                                     :: pos, pos_old ! indices of the mesh point wich preceed the particle position
+    integer                                     :: proc_gap, gap! distance between my (mpi) coordonate and coordinate of the
+                                                                ! processus associated to a given position
+    integer                                     :: proc_end     ! final indice of processus associate to current pos
+    logical, dimension(3)                       :: myself
+    integer, dimension(:), allocatable          :: send_carto   ! cartogrpahy of what I have to send
+    integer                                     :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
+    integer                                     :: ind_for_i1   ! where to read the first coordinate (i1) of the current line inside the cartography ?
+    real(WP), dimension(:), allocatable         :: send_buffer  ! to store what I have to send (on a contiguous way)
+    integer, dimension(gs(1),gs(2),2)           :: rece_gap     ! distance between me and processus wich send me information
+    integer, dimension(2 , 2)                   :: send_gap     ! distance between me and processus to wich I send information
+    integer, dimension(2)                       :: rece_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer                                     :: com_size     ! size of message send/receive
+    integer, dimension(:), allocatable          :: size_com     ! size of message send/receive
+    integer                                     :: min_size     ! minimal size of cartography(:,proc_gap)
+    integer                                     :: max_size     ! maximal size of cartography(:,proc_gap)
+    integer                                     :: tag          ! mpi message tag
+    integer, dimension(:), allocatable          :: tag_proc     ! mpi message tag
+    integer                                     :: ierr         ! mpi error code
+    integer, dimension(:), allocatable          :: s_request_bis! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable          :: rece_request ! mpi communication request (handle) of nonblocking receive
+    integer, dimension(MPI_STATUS_SIZE)         :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(:,:), allocatable        :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block for wich the
+                                                                ! current processus requiers data from proc_gap and for each of these lines the range
+                                                                ! of mesh points from where it requiers the velocity values.
+
+    ! -- Initialisation --
+    idir1 = id1 - 1
+    idir2 = id2 - 1
+    ! Compute range of the set of point where I need the velocity value
+    rece_ind_min = floor(p_V(1,:,:)) - stencil_g
+    rece_ind_max = floor(p_V(mesh_sc%N_proc(direction),:,:)) + stencil_d
+
+    ! ===== Exchange velocity field if needed =====
+    ! It uses non blocking message to do the computations during the communication process
+    ! -- What have I to communicate ? --
+    rece_gap(:,:,1) = floor(real(rece_ind_min-1)/mesh_V%N_proc(direction))
+    rece_gap(:,:,2) = floor(real(rece_ind_max-1)/mesh_V%N_proc(direction))
+    rece_gap_abs(1) = minval(rece_gap(:,:,1))
+    rece_gap_abs(2) = maxval(rece_gap(:,:,2))
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(cartography(max_size,rece_gap_abs(1):rece_gap_abs(2)))
+    call AC_interpol_determine_communication(direction, ind_group, gs, send_gap,  &
+    & rece_gap, rece_gap_abs, cartography)
+
+    ! -- Send messages about what I want --
+    allocate(s_request_bis(rece_gap_abs(1):rece_gap_abs(2)))
+    allocate(size_com(rece_gap_abs(1):rece_gap_abs(2)))
+    allocate(tag_proc(rece_gap_abs(1):rece_gap_abs(2)))
+    min_size = 2 + gs(2)
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            cartography(1,proc_gap) = 0
+            ! Use the cartography to know which lines are concerned
+            size_com(proc_gap) = cartography(2,proc_gap)
+            ! Range I want - store into the cartography
+            gap = proc_gap*mesh_V%N_proc(direction)
+            ! Position in cartography(:,proc_gap) of the current i1 indice
+            ind_for_i1 = min_size
+            do i2 = 1, gs(2)
+                do ind = ind_for_i1+1, ind_for_i1 + cartography(2+i2,proc_gap), 2
+                    do i1 = cartography(ind,proc_gap), cartography(ind+1,proc_gap)
+                        ! Interval start from:
+                        cartography(size_com(proc_gap)+1,proc_gap) = max(rece_ind_min(i1,i2), gap+1) ! fortran => indice start from 0
+                        ! and ends at:
+                        cartography(size_com(proc_gap)+2,proc_gap) = min(rece_ind_max(i1,i2), gap+mesh_V%N_proc(direction))
+                        ! update number of element to receive
+                        cartography(1,proc_gap) = cartography(1,proc_gap) &
+                                    & + cartography(size_com(proc_gap)+2,proc_gap) &
+                                    & - cartography(size_com(proc_gap)+1,proc_gap) + 1
+                        size_com(proc_gap) = size_com(proc_gap)+2
+                    end do
+                end do
+                ind_for_i1 = ind_for_i1 + cartography(2+i2,proc_gap)
+            end do
+            ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.
+            tag_proc(proc_gap) = compute_tag(ind_group, tag_velo_range, direction, proc_gap)
+            ! Send message
+#ifdef PART_DEBUG
+            if(size_com(proc_gap)>max_size) then
+                print*, 'rank = ', cart_rank, ' -- bug sur taille cartography a envoyer'
+                print*, 'taille carto = ', com_size, ' plus grand que la taille théorique ', &
+                    & max_size, ' et carto = ', cartography(:,proc_gap)
+            end if
+#endif
+            call mpi_ISsend(cartography(1,proc_gap), size_com(proc_gap), MPI_INTEGER,   &
+                & neighbors(direction,proc_gap), tag_proc(proc_gap), D_comm(direction), &
+                & s_request_bis(proc_gap),ierr)
+        end if
+    end do
+
+
+    ! -- Non blocking reception of the velocity field --
+    ! Allocate the pos_in_buffer to compute V_buffer size and to be able to
+    ! allocate it.
+    allocate(pos_in_buffer(rece_gap_abs(1):rece_gap_abs(2)))
+    pos_in_buffer(rece_gap_abs(1)) = 1
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)-1
+        pos_in_buffer(proc_gap+1)= pos_in_buffer(proc_gap) + cartography(1,proc_gap)
+    end do
+    allocate(V_buffer(pos_in_buffer(rece_gap_abs(2)) &
+                & + cartography(1,rece_gap_abs(2))))
+    V_buffer = 0
+    allocate(rece_request(rece_gap_abs(1):rece_gap_abs(2)))
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            ! IIa - Compute reception tag
+            tag = compute_tag(ind_group, tag_velo_V, direction, -proc_gap)
+            ! IIb - Receive message
+            call mpi_Irecv(V_buffer(pos_in_buffer(proc_gap)), cartography(1,proc_gap), MPI_DOUBLE_PRECISION, &
+                    & neighbors(direction,proc_gap), tag, D_comm(direction), rece_request(proc_gap), ierr)
+        end if
+    end do
+
+    ! -- Send the velocity field to processus which need it --
+    allocate(send_carto(max_size))
+    do proc_gap = send_gap(1,1), send_gap(1,2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            ! I - Receive messages about what I have to send
+            ! Ia - Compute reception tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.
+            tag = compute_tag(ind_group, tag_velo_range, direction, -proc_gap)
+            ! Ib - Receive the message
+            call mpi_recv(send_carto(1), max_size, MPI_INTEGER, neighbors(direction,proc_gap), &
+              & tag, D_comm(direction), rece_status, ierr)
+            ! II - Send it
+            ! IIa - Create send buffer
+            allocate(send_buffer(send_carto(1)))
+            gap = proc_gap*mesh_V%N_proc(direction)
+            com_size = 0
+            ind_1Dtable = send_carto(2)
+            ! Position in cartography(:,proc_gap) of the current i1 indice
+            ind_for_i1 = min_size
+            do i2 = 1, gs(2)
+                do ind = ind_for_i1+1, ind_for_i1 + send_carto(2+i2), 2
+                    do i1 = send_carto(ind), send_carto(ind+1)
+                        do ind_com = send_carto(ind_1Dtable+1)+gap, send_carto(ind_1Dtable+2)+gap ! indice inside the current line
+                            com_size = com_size + 1
+                            send_buffer(com_size) = V_coarse(ind_com, i1+idir1,i2+idir2)
+                        end do
+                        ind_1Dtable = ind_1Dtable + 2
+                    end do
+                end do
+                ind_for_i1 = ind_for_i1 + send_carto(2+i2)
+            end do
+            ! IIa_bis - check correctness
+#ifdef PART_DEBUG
+            if(com_size/=send_carto(1)) then
+                print*, 'rank = ', cart_rank, ' -- bug sur taille champ de vitesse a envoyer'
+                print*, 'taille carto = ', com_size, ' plus grand recu ', &
+                    & send_carto(1), ' et carto = ', send_carto(:)
+            end if
+#endif
+            ! IIb - Compute send tag
+            tag = compute_tag(ind_group, tag_velo_V, direction, proc_gap)
+            ! IIc - Send message
+            call mpi_Send(send_buffer(1), com_size, MPI_DOUBLE_PRECISION,  &
+                    & neighbors(direction,proc_gap), tag, D_comm(direction),&
+                    & ierr)
+                    !& ierr)
+            deallocate(send_buffer)
+        end if
+    end do
+    deallocate(send_carto)
+
+    !-- Free som ISsend buffer and some array --
+! XXX Todo : préférer un call MPI_WAIT_ALL couplé avec une init de s_request_bis
+! sur MPI_REQUEST_NULL et enlever la boucle ET le if.
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            call MPI_WAIT(s_request_bis(proc_gap),rece_status,ierr)
+        end if
+    end do
+    deallocate(s_request_bis)
+    deallocate(cartography) ! We do not need it anymore
+    deallocate(tag_proc)
+    deallocate(size_com)
+
+    ! Check if communication are done before starting the interpolation
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap)/=D_rank(direction)) then
+            call mpi_wait(rece_request(proc_gap), rece_status, ierr)
+        end if
+    end do
+    deallocate(rece_request)
+
+!print*, '#### rank = ', cart_rank, ' / V_buff = ', V_buffer
+
+
+  ! ===== Compute the interpolated velocity =====
+  pos_in_buffer = pos_in_buffer - 1
+  do i2 = 1, gs(2)
+    do i1 = 1, gs(1)
+      ind = 1
+      pos = floor(p_V(ind,i1,i2))-stencil_g
+      pos_old = pos-1
+      proc_gap = floor(dble(pos-1)/mesh_V%N_proc(direction))
+      myself(1) =(D_rank(direction)== neighbors(direction,proc_gap))
+      myself(2) = (D_rank(direction) == neighbors(direction,proc_gap+1))
+      ind_gap = proc_gap*mesh_V%N_proc(direction)
+      proc_end=(proc_gap+1)*mesh_V%N_proc(direction)
+      do while (ind <= mesh_sc%N_proc(direction))
+        if (myself(1)) then
+          ! Case 1: If all stencil points belong to the local subdomain associate to current MPI process:
+          do while((pos+stencil_size-1<=proc_end).and.(ind<=mesh_sc%N_proc(direction)))
+            weight=get_weight(p_V(ind,i1,i2)-(pos+stencil_g))
+            V_ind = pos - ind_gap
+            p_V(ind,i1,i2) = sum(weight*V_coarse(V_ind:V_ind+stencil_size-1,i1+idir1,i2+idir2))
+            ! Update for next particle:
+            ind = ind + 1
+            pos_old = pos
+            pos = floor(p_V(ind,i1,i2))-stencil_g
+          end do ! case 1: while((pos+stencil_size<=proc_end).and.(V_ind<=mesh_sc%N_proc(direction)))
+          ! Case 2: Else if the stencil intersect two local subdomain
+          do while((pos<=proc_end).and.(ind <= mesh_sc%N_proc(direction)))
+            weight=get_weight(p_V(ind,i1,i2)-(pos+stencil_g))
+            V_ind = pos - ind_gap
+            i_limit = mesh_V%N_proc(direction) - V_ind + 1
+            p_V(ind,i1, i2) = weight(1)*V_coarse(V_ind,i1+idir1,i2+idir2)
+            do i = 2, i_limit
+              p_V(ind,i1,i2) = p_V(ind,i1,i2) + weight(i)*V_coarse(i+V_ind-1,i1+idir1,i2+idir2)
+            end do
+            if(myself(2)) then
+              do i = i_limit+1, stencil_size
+                p_V(ind,i1,i2) = p_V(ind,i1,i2) + weight(i)*V_coarse(i-i_limit,i1+idir1,i2+idir2)
+              end do
+            else ! not(myself(2))
+              ! Start to read in buffer at (pos_in_buffer(proc_gap+1)+1) and do
+              ! not update pos_in_buffer until pos does not change of subdomain.
+              do i = i_limit+1, stencil_size
+                p_V(ind,i1,i2) = p_V(ind,i1,i2) + weight(i)*V_buffer(pos_in_buffer(proc_gap+1)+i-i_limit)
+              end do
+            end if
+            ! Si non(stencil_size < N_proc(direction) +1):
+            !   calculer i_limit2 = min(stencil_size, proc_end + N_proc - pos)
+            !   arrêter la boucle précédente à "i_limit2"
+            !   ajouter une boucle de i_limit2+1 à stencil_size
+            !   Dans cette boucle utiliser proc_gap+2 et myself(3)
+            ! Et ainsi de suite ...
+            ! Update for next particle:
+            ind = ind + 1
+            pos_old = pos
+            pos = floor(p_V(ind,i1,i2))-stencil_g
+          end do ! case 2: ((pos<=proc_end).and.(ind <= mesh_sc%N_proc(direction)))
+        else ! not(myself(1))
+          ! Case 1: If all stencil points belong to the local subdomain associate to current MPI process:
+          do while((pos+stencil_size-1<=proc_end).and.(ind<=mesh_sc%N_proc(direction)))
+            weight=get_weight(p_V(ind,i1,i2)-(pos+stencil_g))
+            pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + pos - pos_old
+            p_V(ind,i1,i2) = sum(weight*V_buffer(pos_in_buffer(proc_gap):pos_in_buffer(proc_gap)+stencil_size-1))
+            ! Update for next particle:
+            ind = ind + 1
+            pos_old = pos
+            pos = floor(p_V(ind,i1,i2))-stencil_g
+          end do ! case 1: while((pos+stencil_size<=proc_end).and.(V_ind<=mesh_sc%N_proc(direction)))
+          ! Case 2: Else if the stencil intersect two local subdomain
+          do while((pos<=proc_end).and.(ind <= mesh_sc%N_proc(direction)) &
+                                 &.and.(ind<=mesh_sc%N_proc(direction)))
+            weight=get_weight(p_V(ind,i1,i2)-(pos+stencil_g))
+            i_limit = mesh_V%N_proc(direction) - (pos-ind_gap) + 1
+            pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + pos - pos_old
+            p_V(ind,i1,i2) = weight(1)*V_buffer(pos_in_buffer(proc_gap))
+            do i = 2, i_limit
+              p_V(ind,i1,i2) = p_V(ind,i1,i2) + weight(i)*V_buffer(pos_in_buffer(proc_gap)+i-1)
+            end do
+            if(myself(2)) then
+              do i = i_limit+1, stencil_size
+                p_V(ind,i1,i2) = p_V(ind,i1,i2) + weight(i)*V_coarse(i-i_limit,i1+idir1,i2+idir2)
+              end do
+            else ! not(myself(2))
+              do i = i_limit+1, stencil_size
+                p_V(ind,i1,i2) = p_V(ind,i1,i2) + weight(i)*V_buffer(pos_in_buffer(proc_gap+1)+i-i_limit)
+              end do
+            end if
+            ! Update for next particle:
+            ind = ind + 1
+            pos_old = pos
+            pos = floor(p_V(ind,i1,i2))-stencil_g
+          end do ! case 2: ((pos<=proc_end).and.(ind <= mesh_sc%N_proc(direction)))
+        end if
+        ! Case 3 and 4 can be gathered, either myself is true or not.
+        ! Case 3: Pos belong to the next subdomain
+        if(ind<=mesh_sc%N_proc(direction)) then !Changement de proc
+          proc_gap = proc_gap+1
+          myself(1) = myself(2)
+          ind_gap = proc_end
+          proc_end = proc_end + mesh_V%N_proc(direction)
+          myself(2) = (D_rank(direction) ==neighbors(direction, proc_gap+1))
+        else
+        ! Case 4: End of the line. Update pos_in buffer for next line.
+          ! pos_in_buffer must be update to the maximal indices already used.
+          if (pos_old+stencil_size<=proc_end) then
+            if (.not.(myself(1))) pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + stencil_size - 1
+          else
+            i_limit = mesh_V%N_proc(direction) - (pos_old-ind_gap) + 1
+            !i_limit = mesh_V%N_proc(direction) - (pos-ind_gap)
+            if (.not.(myself(1))) pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + i_limit - 1
+            if (.not.(myself(2))) pos_in_buffer(proc_gap+1) = pos_in_buffer(proc_gap+1) + (stencil_size - i_limit)
+          end if
+        end if ! if case 3
+      end do  ! while (ind<mesh_sc%N_proc)
+    end do    ! i1 = 1, gs(1)
+  end do      ! i2 = 1, gs(2)
+
+  deallocate(pos_in_buffer)   ! We do not need it anymore
+
+  ! ===== Free memory =====
+  ! -- Deallocate dynamic array --
+  deallocate(V_buffer)
+
+end subroutine AC_interpol_plus
+
+
+!> Interpolate the velocity field used in a RK2 scheme for particle advection -
+!! version for direction with no domain subdivision ands thus no required
+!! communications. Work with any interpolation formula.
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        gs          = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]        V_comp      = velocity to interpolate
+!!    @param[in,out]    p_V         = particle position in input and particle velocity (along the current direction) as output
+!! @details
+!!    A RK2 scheme is used to advect the particles : the midlle point scheme. An
+!!    intermediary position "p_pos_bis(i) = p_pos(i) + V(i)*dt/2" is computed and then
+!!    the numerical velocity of each particles is computed as the interpolation of V  in
+!!    this point. This field is used to advect the particles at the seconde order in time :
+!!    p_pos(t+dt, i) = p_pos(i) + p_V(i).
+!!    Variant for cases with no required communication.
+subroutine AC_interpol_plus_no_com(direction, gs, id1, id2, V_coarse, p_V)
+
+    ! This code involve a recopy of p_V. It is possible to directly use the 3D velocity field but it will also limit the meroy access.
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+    use interpolation_velo, only : get_weight, stencil_g, stencil_size
+
+    ! Input/Ouput
+    integer, intent(in)                         :: direction    ! current direction
+    integer, dimension(2),intent(in)            :: gs           ! groupe size
+    integer                   , intent(in)      :: id1, id2
+    real(WP), dimension(:,:,:), intent(in)      :: V_coarse
+    real(WP), dimension(:,:,:), intent(inout)   :: p_V
+    ! Others, local
+    integer                                     :: idir1, idir2 ! = (id1, id2) -1 as array indice starts from 1.
+    real(WP), dimension(stencil_size)           :: weight       ! interpolation weight storage
+    integer                                     :: ind, i_st    ! indices
+    integer                                     :: i1, i2       ! indices in the lines group
+    integer                                     :: pos          ! indices of the mesh point wich preceed the particle position
+
+    idir1 = id1 - 1
+    idir2 = id2 - 1
+
+    ! ===== Compute the interpolated velocity =====
+    ! -- Compute the interpolation weight and update the velocity directly in p_V --
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            do ind = 1, mesh_sc%N(direction)
+              pos = floor(p_V(ind,i1,i2))
+              weight=get_weight(p_V(ind,i1,i2)-pos)
+              pos = pos - stencil_g - 1
+              p_V(ind,i1,i2) = weight(1)*V_coarse(modulo(pos,mesh_V%N(direction))+1,i1+idir1,i2+idir2)
+              do i_st = 2, stencil_size
+                p_V(ind,i1,i2) = p_V(ind,i1,i2) + &
+                    & weight(i_st)*V_coarse(modulo(pos+i_st-1,mesh_V%N(direction))+1,i1+idir1,i2+idir2)
+              end do ! loop on stencil points.
+            end do ! loop on particle indice (ind)
+        end do ! loop on first coordinate (i1) of a line inside the block of line
+    end do ! loop on second coordinate (i2) of a line inside the block of line
+
+end subroutine AC_interpol_plus_no_com
+
+
+!> Interpolate the velocity field from coarse grid at particles positions
+end module advec_common_interpol
+!> @}
diff --git a/HySoP/src/scalesInterface/particles/advec_common_remesh.F90 b/HySoP/src/scalesInterface/particles/advec_common_remesh.F90
new file mode 100644
index 000000000..371284faf
--- /dev/null
+++ b/HySoP/src/scalesInterface/particles/advec_common_remesh.F90
@@ -0,0 +1,1551 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_common
+!
+!
+! DESCRIPTION:
+!> The module ``advec_common'' gather function and subroutines used to advec scalar
+!! which are not specific to a direction
+!! @details
+!! This module gathers functions and routines used to advec scalar which are not
+!! specific to a direction. This is a parallel implementation using MPI and
+!! the cartesien topology it provides. It also contains the variables common to
+!! the solver along each direction and other generic variables used for the
+!! advection based on the particle method.
+!!
+!! Except for testing purpose, this module is not supposed to be used by the
+!! main code but only by the other advection module. More precisly, an final user
+!! must only used the generic "advec" module wich contain all the interface to
+!! solve the advection equation with the particle method, and to choose the
+!! remeshing formula, the dimensionnal splitting and everything else.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_common_remesh
+
+    use precision_tools
+    use advec_abstract_proc
+
+    implicit none
+
+
+    ! Information about the particles and their bloc
+    public
+
+
+    ! ===== Public procedures =====
+    !----- Init remeshing context -----
+    public  :: AC_setup_init
+    public  :: AC_remesh_setup_alongX
+    public  :: AC_remesh_setup_alongY
+    public  :: AC_remesh_setup_alongZ
+    !----- To remesh particles -----
+    public                        :: AC_remesh_lambda_group
+    public                        :: AC_remesh_Mprime_group
+    !----- Tools to  remesh particles -----
+    public                        :: AC_remesh_range
+    public                        :: AC_remesh_determine_communication
+    public                        :: AC_remesh_cartography
+
+    ! ===== Private procedures =====
+    !----- Prepare and perform communication required during remeshing -----
+    private :: AC_remesh_init
+    private :: AC_remesh_finalize
+
+    ! ===== Public variables =====
+
+    ! ===== Private variables =====
+    !> Pointer to subroutine wich remesh particle to a buffer - for formula of lambda family (with tag/type).
+    procedure(remesh_in_buffer_type), pointer, private      :: remesh_in_buffer_lambda_pt => null()
+    !> Pointer to subroutine wich remesh particle to a buffer - for formula of lambda family (with tag/type).
+    procedure(remesh_in_buffer_limit), pointer, private     :: remesh_in_buffer_limit_lambda_pt => null()
+    !> Pointer to subroutine wich remesh particle to a buffer - for formula of M' family (without tag/type).
+    procedure(remesh_in_buffer_notype), pointer, private    :: remesh_in_buffer_Mprime_pt => null()
+    !> Pointer to subroutine wich redistribute a buffer (containing remeshed
+    !! particle) inside the original scalar field.
+    procedure(remesh_buffer_to_scalar), pointer, private    :: remesh_buffer_to_scalar_pt => null()
+    !> Pointer to subroutine which compute scalar slope along the current
+    !! direction and then computes the limitator function (divided by 8)
+    procedure(advec_limitator_group), pointer, private      :: advec_limitator            => null()
+
+
+contains
+
+! ===== Public procedure =====
+
+! ================================================================================ !
+! =============     To deal with remeshing setup and generecity      ============= !
+! ================================================================================ !
+
+!> Init remesh_line_pt for the right remeshing formula
+subroutine AC_setup_init()
+
+    use advec_remeshing_lambda
+    use advec_remeshing_Mprime
+
+    call AC_remesh_init_lambda()
+    call AC_remesh_init_Mprime()
+
+end subroutine AC_setup_init
+
+!> Setup remesh_in_buffer and remesh_in_buffer_to_scalar for remeshing along X
+subroutine AC_remesh_setup_alongX()
+    use advecX
+
+    remesh_in_buffer_lambda_pt      => advecX_remesh_in_buffer_lambda
+    remesh_in_buffer_limit_lambda_pt=> advecX_remesh_in_buffer_limit_lambda
+    remesh_in_buffer_Mprime_pt      => advecX_remesh_in_buffer_Mprime
+
+    remesh_buffer_to_scalar_pt      => advecX_remesh_buffer_to_scalar
+
+    advec_limitator                 => advecX_limitator_group
+
+end subroutine AC_remesh_setup_alongX
+
+!> Setup remesh_in_buffer and remesh_in_buffer_to_scalar for remeshing along X
+subroutine AC_remesh_setup_alongY()
+    use advecY
+
+    remesh_in_buffer_lambda_pt      => advecY_remesh_in_buffer_lambda
+    remesh_in_buffer_limit_lambda_pt=> advecY_remesh_in_buffer_limit_lambda
+    remesh_in_buffer_Mprime_pt      => advecY_remesh_in_buffer_Mprime
+    remesh_buffer_to_scalar_pt      => advecY_remesh_buffer_to_scalar
+
+    advec_limitator                 => advecY_limitator_group
+
+end subroutine AC_remesh_setup_alongY
+
+!> Setup remesh_in_buffer and remesh_in_buffer_to_scalar for remeshing along Z
+subroutine AC_remesh_setup_alongZ()
+    use advecZ
+
+    remesh_in_buffer_lambda_pt      => advecZ_remesh_in_buffer_lambda
+    remesh_in_buffer_limit_lambda_pt=> advecZ_remesh_in_buffer_limit_lambda
+    remesh_in_buffer_Mprime_pt      => advecZ_remesh_in_buffer_Mprime
+    remesh_buffer_to_scalar_pt      => advecZ_remesh_buffer_to_scalar
+
+    advec_limitator                 => advecZ_limitator_group
+
+end subroutine AC_remesh_setup_alongZ
+
+
+! ==============================================================================================
+! ====================     Remesh all the particles of a group of lines     ====================
+! ==============================================================================================
+
+
+!> remeshing with an order 2 or 4 lambda method, corrected to allow large CFL number - group version
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        p_V         = particles velocity (needed for tag and type)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        dt          = time step (needed for tag and type)
+subroutine AC_remesh_lambda_group(direction, ind_group, gs, p_pos_adim, p_V, j, k, scal, dt)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+    use advec_correction        ! To compute type and tag
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    ! To type and tag particles
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+    ! Others
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    integer, dimension(gs(1),gs(2),2)       :: send_gap     ! distance between me and processus wich send me information
+    integer, dimension(2)                   :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2 , 2)               :: rece_gap     ! distance between me and processus to wich I send information
+    integer, dimension(:,:), allocatable    :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+    ! Variable use to manage mpi communications
+    integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
+
+    ! ===== Pre-Remeshing: Determine blocks type and tag particles =====
+    call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+
+    ! ===== Compute range of remeshing data =====
+    call AC_remesh_range(bl_type, p_pos_adim, direction, send_group_min, send_group_max, send_gap, send_gap_abs)
+
+    ! ===== Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) =====
+    ! -- Allocation --
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(cartography(max_size,send_gap_abs(1):send_gap_abs(2)))
+    ! -- Determine which processes communicate together --
+    call AC_remesh_determine_communication(direction, gs, ind_group, send_group_min, send_group_max, &
+        & rece_gap, send_gap, send_gap_abs, cartography)
+
+    ! ===== Proceed to remeshing via a local buffer =====
+    call AC_remesh_via_buffer_lambda(direction, ind_group, gs, p_pos_adim, j, k,&
+        & scal, send_group_min, send_group_max, send_gap_abs, rece_gap,         &
+        & cartography, bl_type, bl_tag)
+
+    ! -- Free all communication buffer and data --
+    deallocate(cartography)
+
+end subroutine AC_remesh_lambda_group
+
+
+!> remeshing with an order 2 limited lambda method, corrected to allow large CFL number - group version
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        p_V         = particles velocity (needed for tag and type)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        dt          = time step (needed for tag and type)
+subroutine AC_remesh_limit_lambda_group(direction, ind_group, gs, p_pos_adim, p_V, j, k, scal, dt)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+    use advec_correction        ! To compute type and tag
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    ! To type and tag particles
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+    real(WP), dimension(mesh_sc%N_proc(direction)+1,gs(1),gs(2)):: limit        ! limitator function (divided by 8.)
+    ! Others
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    integer, dimension(gs(1),gs(2),2)       :: send_gap     ! distance between me and processus wich send me information
+    integer, dimension(2)                   :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2 , 2)               :: rece_gap     ! distance between me and processus to wich I send information
+    integer, dimension(:,:), allocatable    :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+                                                            ! current processus will send data during remeshing and for each of these lines the range
+                                                            ! of mesh points from where it requiers the velocity values.
+    ! Variable use to manage mpi communications
+    integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
+
+    ! ===== Pre-Remeshing I: Determine blocks type and tag particles =====
+    call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+
+    ! ===== Compute range of remeshing data =====
+    call AC_remesh_range(bl_type, p_pos_adim, direction, send_group_min, send_group_max, send_gap, send_gap_abs)
+
+    ! ===== Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) =====
+    ! -- Allocation --
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(cartography(max_size,send_gap_abs(1):send_gap_abs(2)))
+    ! -- Determine which processes communicate together --
+    call AC_remesh_determine_communication(direction, gs, ind_group, send_group_min, send_group_max, &
+        & rece_gap, send_gap, send_gap_abs, cartography)
+
+    ! ===== Pre-Remeshing II: Compute the limitor function =====
+    ! Actually, this subroutine compute [limitator/8] as this is this fraction
+    ! wich appear always in the remeshing polynoms.
+    call advec_limitator(gs, ind_group, j, k, p_pos_adim, scal, limit)
+
+    ! ===== Proceed to remeshing via a local buffer =====
+    call AC_remesh_via_buffer_limit_lambda(direction, ind_group, gs, p_pos_adim,&
+        & j, k, scal, send_group_min, send_group_max, send_gap_abs, rece_gap,   &
+        & cartography, bl_type, bl_tag, limit)
+
+    ! -- Free all communication buffer and data --
+    deallocate(cartography)
+
+end subroutine AC_remesh_limit_lambda_group
+
+
+!> remeshing with a M'6 or M'8 remeshing formula - group version
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        p_V         = particles velocity (needed for tag and type)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        dt          = time step (needed for tag and type)
+subroutine AC_remesh_Mprime_group(direction, ind_group, gs, p_pos_adim, p_V, j, k, scal, dt)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    integer, dimension(gs(1),gs(2),2)       :: send_gap     ! distance between me and processus wich send me information
+    integer, dimension(2)                   :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2 , 2)               :: rece_gap     ! distance between me and processus to wich I send information
+    integer, dimension(:,:), allocatable    :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+                                                            ! current processus will send data during remeshing and for each of these lines the range
+                                                            ! of mesh points from where it requiers the velocity values.
+    ! Variable use to manage mpi communications
+    integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
+
+    ! ===== Compute range of remeshing data =====
+    call AC_remesh_range_notype(p_pos_adim, direction, send_group_min, send_group_max, send_gap, send_gap_abs)
+
+    ! ===== Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) =====
+    ! -- Allocation --
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(cartography(max_size,send_gap_abs(1):send_gap_abs(2)))
+    ! -- Determine which processes communicate together --
+    call AC_remesh_determine_communication_com(direction, gs, ind_group, &
+        & rece_gap, send_gap, send_gap_abs, cartography)
+
+    ! ===== Proceed to remeshing via a local buffer =====
+    call AC_remesh_via_buffer_Mprime(direction, ind_group, gs, p_pos_adim,  &
+        &  j, k, scal, send_group_min, send_group_max, send_gap_abs,        &
+        &  rece_gap, cartography)
+
+    ! -- Free all communication buffer and data --
+    deallocate(cartography)
+
+end subroutine AC_remesh_Mprime_group
+
+
+! ===================================================================================================
+! ===== Tools to remesh particles: variant of remeshing via buffer for each family of remeshing =====
+! ===================================================================================================
+
+
+!> Using input information to update the scalar field by creating particle
+!! weight (from scalar values), set scalar to 0, redistribute particle inside
+!! - variant for corrected lambda remeshing formula.
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        bl_type     = table of blocks type (center of left)
+!!    @param[in]        bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
+!!                                    and the begining of the following one is tagged)
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles
+!!    @param[in]        send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+!!    @param[in]        rece_gap    = coordinate range of processes which will send me information during the remeshing.
+!!    @param[in,out]    cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!! @details
+!!    This procedure manage all communication needed. To minimize communications,
+!! particles are remeshing inside a local buffer wich is after send to the
+!! processus wich contain the right sub-domain depending off the particle
+!! position. There is no need of communication in order to remesh inside the
+!! buffer. To avoid recopy in creating particle weight (which will be weight
+!! = scalar), scalar is directly redistribute inside the local buffer.
+!! This provides:
+!!    a - Remesh particle: redistribute scalar field inside a local buffer and
+!!        set scalar = 0.
+!!    b - Send local buffer to its target processus and update the scalar field,
+!!        ie scalar = scalar + received buffer.
+!! "remesh_in_buffer_pt" do the part "a" and "remesh_buffer_to_scalar" the part
+!! B except the communication. The current subroutine manage all the
+!! communications (and other stuff needed to allow correctness).
+subroutine AC_remesh_via_buffer_lambda(direction, ind_group, gs, p_pos_adim,   &
+        & j, k, scal, send_min, send_max, send_gap_abs, rece_gap, cartography, &
+        & bl_type, bl_tag)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_abstract_proc     ! contain some useful procedure pointers.
+    use advecX                  ! procedure specific to advection alongX
+    use advecY                  ! procedure specific to advection alongY
+    use advecZ                  ! procedure specific to advection alongZ
+    use cart_topology           ! Description of mesh and of mpi topology
+    use mpi
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    logical,dimension(:,:,:),intent(in)         :: bl_type      ! is the particle block a center block or a left one ?
+    logical,dimension(:,:,:),intent(in)         :: bl_tag       ! indice of tagged particles
+    real(WP),dimension(:,:,:),intent(inout)     :: scal
+    integer, dimension(:,:), intent(in)         :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)         :: send_max     ! distance between me and processus wich send me information
+    integer, dimension(2), intent(in)           :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2, 2), intent(in)        :: rece_gap     ! distance between me and processus to wich I send information
+    integer, dimension(:,:), intent(inout)      :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+                                                            ! current processus will send data during remeshing and for each of these lines the range
+                                                            ! of mesh points from where it requiers the velocity values.
+    ! Other local variables
+    ! Others
+    integer, dimension(:,:), allocatable    :: rece_carto   ! same as abobve but for what I receive
+    integer                                 :: min_size     ! minimal size of cartography(:,proc_gap)
+    integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable,target:: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+                                                            ! sorted by receivers and not by coordinate.
+    integer, dimension(:), allocatable      :: pos_in_buffer! buffer size
+    ! Variable use to manage mpi communications
+    integer, dimension(:), allocatable      :: s_request_ran! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable      :: r_request_ran! mpi communication request (handle) of nonblocking receive
+    integer, dimension(:,:), allocatable    :: r_status     ! mpi communication status of nonblocking receive
+    integer, dimension(:,:), allocatable    :: s_status     ! mpi communication status of nonblocking send
+    integer                                 :: ierr         ! mpi error code
+    integer                                 :: nb_r, nb_s   ! number of reception/send
+
+
+    ! ===== Allocation =====
+    ! -- allocate request about cartography (non-blocking) reception --
+    nb_r = rece_gap(1,2) - rece_gap(1,1) + 1
+    allocate(r_request_ran(1:nb_r))
+    r_request_ran = MPI_REQUEST_NULL
+    ! -- allocate cartography about what I receive --
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(rece_carto(max_size,rece_gap(1,1):rece_gap(1,2)))
+    ! -- allocate request about cartography (non-blocking) send --
+    nb_s = send_gap_abs(2) - send_gap_abs(1) + 1
+    allocate(s_request_ran(1:nb_s))
+    ! -- To manage buffer --
+    ! Position of sub-buffer associated different mpi-processes
+    allocate(pos_in_buffer(0:nb_s))
+
+    ! ===== Init the remeshing process: pre-process  =====
+    ! Perform a cartography of mesh points where particle will be remesh,
+    ! create a 1D to buffer where remeshing will be performed and create
+    ! tools to manage it.
+    call AC_remesh_init(direction, ind_group, gs, send_min, send_max, &
+        & send_gap_abs, rece_gap, nb_s, cartography, rece_carto,      &
+        & pos_in_buffer, min_size, max_size, s_request_ran, r_request_ran)
+
+
+    ! ===== Initialize the general buffer =====
+    allocate(send_buffer(pos_in_buffer(nb_s) &
+                & + cartography(1,nb_s)-1))
+    send_buffer = 0.0
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    call remesh_in_buffer_lambda_pt(gs, j, k, send_gap_abs(1)-1, p_pos_adim, bl_type, bl_tag, send_min, &
+            & send_max, scal, send_buffer, pos_in_buffer)
+    ! Observe that now:
+    ! => pos_in_buffer(i-1) = first (1D-)indice of the sub-array of send_buffer
+    ! associated to he i-rd mpi-processus to wich I will send remeshed particles.
+
+    ! ===== Wait for reception of all cartography =====
+    allocate(r_status(MPI_STATUS_SIZE,1:nb_r))
+    call mpi_waitall(nb_r,r_request_ran, r_status, ierr)
+    deallocate(r_request_ran)
+    deallocate(r_status)
+    !allocate(s_status(MPI_STATUS_SIZE,1:nb_s))
+    !allocate(ind_array(send_gap_abs(1):send_gap_abs(2)))
+    !call mpi_testsome(size(s_request_ran),s_request_ran, ind_1Dtable, ind_array, s_status, ierr)
+    !deallocate(ind_array)
+
+    ! ===== Finish the remeshing process =====
+    ! Send buffer, receive some other buffers and update scalar field.
+    call AC_remesh_finalize(direction, ind_group, gs, j, k, scal, send_gap_abs, rece_gap, &
+      & nb_r, nb_s, cartography, rece_carto, send_buffer, pos_in_buffer, min_size)
+
+
+    ! ===== Free memory and communication buffer ====
+    ! -- Deallocate all field --
+    deallocate(rece_carto)
+    ! -- Check if Isend are done --
+    allocate(s_status(MPI_STATUS_SIZE,1:nb_s))
+    call mpi_waitall(nb_s, s_request_ran, s_status, ierr)
+    deallocate(s_status)
+    ! -- Free all communication buffer and data --
+    deallocate(send_buffer)
+    deallocate(pos_in_buffer)
+    deallocate(s_request_ran)
+
+end subroutine AC_remesh_via_buffer_lambda
+
+
+!> Using input information to update the scalar field by creating particle
+!! weight (from scalar values), set scalar to 0, redistribute particle inside
+!! - variant for corrected and limited lambda remeshing formula.
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        bl_type     = table of blocks type (center of left)
+!!    @param[in]        bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
+!!                                    and the begining of the following one is tagged)
+!!    @param[in]        limit       = limitator function
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles
+!!    @param[in]        send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+!!    @param[in]        rece_gap    = coordinate range of processes which will send me information during the remeshing.
+!!    @param[in,out]    cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!! @details
+!!    This procedure manage all communication needed. To minimize communications,
+!! particles are remeshing inside a local buffer wich is after send to the
+!! processus wich contain the right sub-domain depending off the particle
+!! position. There is no need of communication in order to remesh inside the
+!! buffer. To avoid recopy in creating particle weight (which will be weight
+!! = scalar), scalar is directly redistribute inside the local buffer.
+!! This provides:
+!!    a - Remesh particle: redistribute scalar field inside a local buffer and
+!!        set scalar = 0.
+!!    b - Send local buffer to its target processus and update the scalar field,
+!!        ie scalar = scalar + received buffer.
+!! "remesh_in_buffer_pt" do the part "a" and "remesh_buffer_to_scalar" the part
+!! B except the communication. The current subroutine manage all the
+!! communications (and other stuff needed to allow correctness).
+subroutine AC_remesh_via_buffer_limit_lambda(direction, ind_group, gs, p_pos_adim,  &
+        & j, k, scal, send_min, send_max, send_gap_abs, rece_gap, cartography,      &
+        & bl_type, bl_tag, limit)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_abstract_proc     ! contain some useful procedure pointers.
+    use advecX                  ! procedure specific to advection alongX
+    use advecY                  ! procedure specific to advection alongY
+    use advecZ                  ! procedure specific to advection alongZ
+    use cart_topology           ! Description of mesh and of mpi topology
+    use mpi
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    logical, dimension(:,:,:), intent(in)       :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(:,:,:), intent(in)       :: bl_tag       ! indice of tagged particles
+    real(WP), dimension(:,:,:), intent(in)      :: limit        ! limitator function (divided by 8.)
+    real(WP),dimension(:,:,:),intent(inout)     :: scal
+    integer, dimension(:,:), intent(in)         :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)         :: send_max     ! distance between me and processus wich send me information
+    integer, dimension(2), intent(in)           :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2, 2), intent(in)        :: rece_gap     ! distance between me and processus to wich I send information
+    integer, dimension(:,:), intent(inout)      :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+                                                            ! current processus will send data during remeshing and for each of these lines the range
+                                                            ! of mesh points from where it requiers the velocity values.
+    ! Other local variables
+    ! Others
+    integer, dimension(:,:), allocatable    :: rece_carto   ! same as abobve but for what I receive
+    integer                                 :: min_size     ! minimal size of cartography(:,proc_gap)
+    integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable,target:: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+                                                            ! sorted by receivers and not by coordinate.
+    integer, dimension(:), allocatable      :: pos_in_buffer! buffer size
+    ! Variable use to manage mpi communications
+    integer, dimension(:), allocatable      :: s_request_ran! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable      :: r_request_ran! mpi communication request (handle) of nonblocking receive
+    integer, dimension(:,:), allocatable    :: r_status     ! mpi communication status of nonblocking receive
+    integer, dimension(:,:), allocatable    :: s_status     ! mpi communication status of nonblocking send
+    integer                                 :: ierr         ! mpi error code
+    integer                                 :: nb_r, nb_s   ! number of reception/send
+
+
+    ! ===== Allocation =====
+    ! -- allocate request about cartography (non-blocking) reception --
+    nb_r = rece_gap(1,2) - rece_gap(1,1) + 1
+    allocate(r_request_ran(1:nb_r))
+    r_request_ran = MPI_REQUEST_NULL
+    ! -- allocate cartography about what I receive --
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(rece_carto(max_size,rece_gap(1,1):rece_gap(1,2)))
+    ! -- allocate request about cartography (non-blocking) send --
+    nb_s = send_gap_abs(2) - send_gap_abs(1) + 1
+    allocate(s_request_ran(1:nb_s))
+    ! -- To manage buffer --
+    ! Position of sub-buffer associated different mpi-processes
+    allocate(pos_in_buffer(0:nb_s))
+
+    ! ===== Init the remeshing process: pre-process  =====
+    ! Perform a cartography of mesh points where particle will be remesh,
+    ! create a 1D to buffer where remeshing will be performed and create
+    ! tools to manage it.
+    call AC_remesh_init(direction, ind_group, gs, send_min, send_max, &
+        & send_gap_abs, rece_gap, nb_s, cartography, rece_carto,      &
+        & pos_in_buffer, min_size, max_size, s_request_ran, r_request_ran)
+
+
+    ! ===== Initialize the general buffer =====
+    allocate(send_buffer(pos_in_buffer(nb_s) &
+                & + cartography(1,nb_s)-1))
+    send_buffer = 0.0
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    call remesh_in_buffer_limit_lambda_pt(gs, j, k, send_gap_abs(1)-1, p_pos_adim, bl_type, bl_tag, limit,  &
+            & send_min, send_max, scal, send_buffer, pos_in_buffer)
+    ! Observe that now:
+    ! => pos_in_buffer(i-1) = first (1D-)indice of the sub-array of send_buffer
+    ! associated to he i-rd mpi-processus to wich I will send remeshed particles.
+
+    ! ===== Wait for reception of all cartography =====
+    allocate(r_status(MPI_STATUS_SIZE,1:nb_r))
+    call mpi_waitall(nb_r,r_request_ran, r_status, ierr)
+    deallocate(r_request_ran)
+    deallocate(r_status)
+    !allocate(s_status(MPI_STATUS_SIZE,1:nb_s))
+    !allocate(ind_array(send_gap_abs(1):send_gap_abs(2)))
+    !call mpi_testsome(size(s_request_ran),s_request_ran, ind_1Dtable, ind_array, s_status, ierr)
+    !deallocate(ind_array)
+
+    ! ===== Finish the remeshing process =====
+    ! Send buffer, receive some other buffers and update scalar field.
+    call AC_remesh_finalize(direction, ind_group, gs, j, k, scal, send_gap_abs, rece_gap, &
+      & nb_r, nb_s, cartography, rece_carto, send_buffer, pos_in_buffer, min_size)
+
+
+    ! ===== Free memory and communication buffer ====
+    ! -- Deallocate all field --
+    deallocate(rece_carto)
+    ! -- Check if Isend are done --
+    allocate(s_status(MPI_STATUS_SIZE,1:nb_s))
+    call mpi_waitall(nb_s, s_request_ran, s_status, ierr)
+    deallocate(s_status)
+    ! -- Free all communication buffer and data --
+    deallocate(send_buffer)
+    deallocate(pos_in_buffer)
+    deallocate(s_request_ran)
+
+end subroutine AC_remesh_via_buffer_limit_lambda
+
+
+!> Using input information to update the scalar field by creating particle
+!! weight (from scalar values), set scalar to 0, redistribute particle inside
+!! - variant for M' remeshing formula.
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles
+!!    @param[in]        send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+!!    @param[in]        rece_gap    = coordinate range of processes which will send me information during the remeshing.
+!!    @param[in,out]    cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!! @details
+!!    This procedure manage all communication needed. To minimize communications,
+!! particles are remeshing inside a local buffer wich is after send to the
+!! processus wich contain the right sub-domain depending off the particle
+!! position. There is no need of communication in order to remesh inside the
+!! buffer. To avoid recopy in creating particle weight (which will be weight
+!! = scalar), scalar is directly redistribute inside the local buffer.
+!! This provides:
+!!    a - Remesh particle: redistribute scalar field inside a local buffer and
+!!        set scalar = 0.
+!!    b - Send local buffer to its target processus and update the scalar field,
+!!        ie scalar = scalar + received buffer.
+!! "remesh_in_buffer_pt" do the part "a" and "remesh_buffer_to_scalar" the part
+!! B except the communication. The current subroutine manage all the
+!! communications (and other stuff needed to allow correctness).
+subroutine AC_remesh_via_buffer_Mprime(direction, ind_group, gs, p_pos_adim, &
+        & j, k, scal, send_min, send_max, send_gap_abs, rece_gap, cartography)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_abstract_proc     ! contain some useful procedure pointers.
+    use advecX                  ! procedure specific to advection alongX
+    use advecY                  ! procedure specific to advection alongY
+    use advecZ                  ! procedure specific to advection alongZ
+    use cart_topology           ! Description of mesh and of mpi topology
+    use mpi
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP),dimension(:,:,:),intent(inout)     :: scal
+    integer, dimension(:,:), intent(in)         :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)         :: send_max     ! distance between me and processus wich send me information
+    integer, dimension(2), intent(in)           :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2, 2), intent(in)        :: rece_gap     ! distance between me and processus to wich I send information
+    integer, dimension(:,:), intent(inout)      :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+                                                            ! current processus will send data during remeshing and for each of these lines the range
+                                                            ! of mesh points from where it requiers the velocity values.
+    ! Other local variables
+    ! Others
+    integer, dimension(:,:), allocatable    :: rece_carto   ! same as abobve but for what I receive
+    integer                                 :: min_size     ! minimal size of cartography(:,proc_gap)
+    integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable,target:: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+                                                            ! sorted by receivers and not by coordinate.
+    integer, dimension(:), allocatable      :: pos_in_buffer! buffer size
+    ! Variable use to manage mpi communications
+    integer, dimension(:), allocatable      :: s_request_ran! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable      :: r_request_ran! mpi communication request (handle) of nonblocking receive
+    integer, dimension(:,:), allocatable    :: r_status     ! mpi communication status of nonblocking receive
+    integer, dimension(:,:), allocatable    :: s_status     ! mpi communication status of nonblocking send
+    integer                                 :: ierr         ! mpi error code
+    integer                                 :: nb_r, nb_s   ! number of reception/send
+
+
+    ! ===== Allocation =====
+    ! -- allocate request about cartography (non-blocking) reception --
+    nb_r = rece_gap(1,2) - rece_gap(1,1) + 1
+    allocate(r_request_ran(1:nb_r))
+    r_request_ran = MPI_REQUEST_NULL
+    ! -- allocate cartography about what I receive --
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(rece_carto(max_size,rece_gap(1,1):rece_gap(1,2)))
+    ! -- allocate request about cartography (non-blocking) send --
+    nb_s = send_gap_abs(2) - send_gap_abs(1) + 1
+    allocate(s_request_ran(1:nb_s))
+    ! -- To manage buffer --
+    ! Position of sub-buffer associated different mpi-processes
+    allocate(pos_in_buffer(0:nb_s))
+
+    ! ===== Init the remeshing process: pre-process  =====
+    ! Perform a cartography of mesh points where particle will be remesh,
+    ! create a 1D to buffer where remeshing will be performed and create
+    ! tools to manage it.
+    call AC_remesh_init(direction, ind_group, gs, send_min, send_max, &
+        & send_gap_abs, rece_gap, nb_s, cartography, rece_carto,      &
+        & pos_in_buffer, min_size, max_size, s_request_ran, r_request_ran)
+
+
+    ! ===== Initialize the general buffer =====
+    allocate(send_buffer(pos_in_buffer(nb_s) &
+                & + cartography(1,nb_s)-1))
+    send_buffer = 0.0
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    call remesh_in_buffer_Mprime_pt(gs, j, k, send_gap_abs(1)-1, p_pos_adim, send_min, &
+            & send_max, scal, send_buffer, pos_in_buffer)
+    ! Observe that now:
+    ! => pos_in_buffer(i-1) = first (1D-)indice of the sub-array of send_buffer
+    ! associated to he i-rd mpi-processus to wich I will send remeshed particles.
+
+    ! ===== Wait for reception of all cartography =====
+    allocate(r_status(MPI_STATUS_SIZE,1:nb_r))
+    call mpi_waitall(nb_r,r_request_ran, r_status, ierr)
+    deallocate(r_request_ran)
+    deallocate(r_status)
+    !allocate(s_status(MPI_STATUS_SIZE,1:nb_s))
+    !allocate(ind_array(send_gap_abs(1):send_gap_abs(2)))
+    !call mpi_testsome(size(s_request_ran),s_request_ran, ind_1Dtable, ind_array, s_status, ierr)
+    !deallocate(ind_array)
+
+    ! ===== Finish the remeshing process =====
+    ! Send buffer, receive some other buffers and update scalar field.
+    call AC_remesh_finalize(direction, ind_group, gs, j, k, scal, send_gap_abs, rece_gap, &
+      & nb_r, nb_s, cartography, rece_carto, send_buffer, pos_in_buffer, min_size)
+
+
+    ! ===== Free memory and communication buffer ====
+    ! -- Deallocate all field --
+    deallocate(rece_carto)
+    ! -- Check if Isend are done --
+    allocate(s_status(MPI_STATUS_SIZE,1:nb_s))
+    call mpi_waitall(nb_s, s_request_ran, s_status, ierr)
+    deallocate(s_status)
+    ! -- Free all communication buffer and data --
+    deallocate(send_buffer)
+    deallocate(pos_in_buffer)
+    deallocate(s_request_ran)
+
+end subroutine AC_remesh_via_buffer_Mprime
+
+
+! ==================================================================================
+! ====================     Other tools to remesh particles      ====================
+! ==================================================================================
+
+!> Determine where the particles of each lines will be remeshed
+!!    @param[in]    bl_type         = equal 0 (resp 1) if the block is left (resp centered)
+!!    @param[in]    p_pos_adim      = adimensionned  particles position
+!!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[out]   send_min        = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[out]   send_max        = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[out]   send_gap        = distance between me and processus wich send me information (for each line of the group)
+!!    @param[out]   send_gap_abs    = send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+subroutine AC_remesh_range(bl_type, p_pos_adim, direction, send_min, send_max, send_gap, send_gap_abs)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    logical, dimension(:,:,:), intent(in)   :: bl_type      ! is the particle block a center block or a left one ?
+    real(WP), dimension(:,:,:), intent(in)  :: p_pos_adim   ! adimensionned particles position
+    integer, intent(in)                     :: direction
+    integer, dimension(:,:), intent(out)    :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(out)    :: send_max     ! distance between me and processus wich send me information
+    integer, dimension(:,:,:), intent(out)  :: send_gap     ! distance between me and processus wich send me information
+    integer, dimension(2), intent(out)      :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+
+    !  -- Compute ranges --
+    where (bl_type(1,:,:))
+        ! First particle is a centered one
+        send_min = nint(p_pos_adim(1,:,:))-remesh_stencil(1)
+    elsewhere
+        ! First particle is a left one
+        send_min = floor(p_pos_adim(1,:,:))-remesh_stencil(1)
+    end where
+    where (bl_type(bl_nb(direction)+1,:,:))
+        ! Last particle is a centered one
+        send_max = nint(p_pos_adim(mesh_sc%N_proc(direction),:,:))+remesh_stencil(2)
+    elsewhere
+        ! Last particle is a left one
+        send_max = floor(p_pos_adim(mesh_sc%N_proc(direction),:,:))+remesh_stencil(2)
+    end where
+
+    ! -- What have I to communicate ? --
+    send_gap(:,:,1) = floor(real(send_min-1)/mesh_sc%N_proc(direction))
+    send_gap(:,:,2) = floor(real(send_max-1)/mesh_sc%N_proc(direction))
+    send_gap_abs(1) = minval(send_gap(:,:,1))
+    send_gap_abs(2) = maxval(send_gap(:,:,2))
+
+end subroutine AC_remesh_range
+
+
+!> Determine where the particles of each lines will be remeshed - Variant for
+!! remeshing without type/tag
+!!    @param[in]    p_pos_adim      = adimensionned  particles position
+!!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[out]   send_min        = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[out]   send_max        = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[out]   send_gap        = distance between me and processus wich send me information (for each line of the group)
+!!    @param[out]   send_gap_abs    = send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+subroutine AC_remesh_range_notype(p_pos_adim, direction, send_min, send_max, send_gap, send_gap_abs)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    real(WP), dimension(:,:,:), intent(in)  :: p_pos_adim   ! adimensionned particles position
+    integer, intent(in)                     :: direction
+    integer, dimension(:,:), intent(out)    :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(out)    :: send_max     ! distance between me and processus wich send me information
+    integer, dimension(:,:,:), intent(out)  :: send_gap     ! distance between me and processus wich send me information
+    integer, dimension(2), intent(out)      :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+
+    !  -- Compute ranges --
+    send_min = floor(p_pos_adim(1,:,:))-remesh_stencil(1)
+    send_max = floor(p_pos_adim(mesh_sc%N_proc(direction),:,:))+remesh_stencil(2)
+
+    ! -- What have I to communicate ? --
+    send_gap(:,:,1) = floor(real(send_min-1)/mesh_sc%N_proc(direction))
+    send_gap(:,:,2) = floor(real(send_max-1)/mesh_sc%N_proc(direction))
+    send_gap_abs(1) = minval(send_gap(:,:,1))
+    send_gap_abs(2) = maxval(send_gap(:,:,2))
+
+end subroutine AC_remesh_range_notype
+
+
+!> Determine the set of processes wich will send me information during the remeshing
+!! and compute for each of these processes the range of wanted data. Use implicit
+!! computation rather than communication (only possible if particle are gather by
+!! block whith contrainst on velocity variation - as corrected lambda formula.) -
+!! work directly on a group of particles lines.
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        remesh_min  =  minimal indice of meshes where I will remesh my particles.
+!!    @param[in]        remesh_max  =  maximal indice of meshes where I will remesh my particles.
+!!    @param[out]       rece_gap    = coordinate range of processes which will send me information during the remeshing.
+!!    @param[in]        send_gap    = distance between me and processus wich send me information (for each line of the group)
+!!    @param[in]        send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+!!    @param[in,out]    cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!! @details
+!!    Work on a group of line of size gs(1) x gs(2))
+!!    Obtain the list of processus which are associated to sub-domain where my particles
+!!    will be remeshed and the list of processes wich contains particles which
+!!    have to be remeshed in my sub-domain. This way, this procedure determine
+!!    which processus need to communicate together in order to proceed to the
+!!    remeshing (as in a parrallel context the real space is subdivised and each
+!!    processus contains a part of it)
+!!        In the same time, it computes for each processus with which I will
+!!    communicate, the range of mesh point involved for each line of particles
+!!    inside the group and it stores it by using some sparse matrix technics
+!!    (see cartography defined in the algorithm documentation)
+!!        This routine does not involve any communication to determine if
+!!    a processus is the first or the last processes (considering its coordinate along
+!!    the current directory) to send remeshing information to a given processes.
+!!    It directly compute it using contraints on velocity (as in corrected lambda
+!!    scheme) When possible use it rather than AC_obtain_senders_com
+subroutine AC_remesh_determine_communication(direction, gs, ind_group, remesh_min, remesh_max, &
+    & rece_gap, send_gap, send_gap_abs, cartography)
+! XXX Work only for periodic condition. For dirichlet conditions : it is
+! possible to not receive either rece_gap(1), either rece_gap(2) or none of
+! these two => detect it (track the first and the last particles) and deal with it.
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+    use mpi
+
+    ! Input/output
+    integer, intent(in)                                 :: direction
+    integer, dimension(2), intent(in)                   :: gs           ! a group size
+    integer, dimension(2), intent(in)                   :: ind_group
+    integer, dimension(:,:), intent(in)                 :: remesh_min   ! minimal indice of meshes where I will remesh my particles.
+    integer, dimension(:,:), intent(in)                 :: remesh_max   ! maximal indice of meshes where I will remesh my particles.
+    integer, dimension(2, 2), intent(out)               :: rece_gap
+    integer(kind=4), dimension(gs(1),gs(2),2),intent(in):: send_gap     ! minimal and maximal processus which contains the sub-domains where my
+                                                                        ! particles will be remeshed for each line of the line group
+    integer, dimension(2), intent(in)                   :: send_gap_abs ! min and maximal processus which contains the sub-domains where my particles will be remeshed.
+    integer, dimension(2+gs(2)*(2+3*gs(1)), &
+        & send_gap_abs(1):send_gap_abs(2)), intent(out) :: cartography
+
+    ! To manage communications and to localize sub-domain
+    integer(kind=4)                         :: proc_gap         ! gap between a processus coordinate (along the current
+                                                                ! direction) into the mpi-topology and my coordinate
+    integer, dimension(2)                   :: tag_table        ! mpi message tag (for communicate rece_gap(1) and rece_gap(2))
+    integer, dimension(:,:),allocatable     :: send_request     ! mpi status of nonblocking send
+    integer                                 :: ierr             ! mpi error code
+    integer, dimension(MPI_STATUS_SIZE)     :: statut           ! mpi status
+    ! To determine which processus is the first/last to send data to another
+    integer, dimension(:,:), allocatable    :: first, last      ! Storage processus to which I will be the first (or the last) to send
+                                                                ! remeshed particles
+    integer                                 :: first_condition  ! allowed range of value of proc_min and proc_max for being the first
+    integer                                 :: last_condition   ! allowed range of value of proc_min and proc_max for being the last
+    ! Other local variable
+    integer                                 :: ind1, ind2       ! indice of the current line inside the group
+    integer                                 :: min_size         ! begin indice in first and last to stock indice along first dimension of the group line
+    integer                                 :: gp_size          ! group size
+    integer,dimension(2)                    :: rece_buffer      ! buffer for reception of rece_max
+    logical                                 :: begin_interval   ! ware we in the start of an interval ?
+
+    rece_gap(1,1) = 3*mesh_sc%N(direction)
+    rece_gap(1,2) = -3*mesh_sc%N(direction)
+    rece_gap(2,:) = 0
+    gp_size = gs(1)*gs(2)
+
+    allocate(send_request(send_gap_abs(1):send_gap_abs(2),3))
+    send_request(:,3) = 0
+
+    ! ===== Compute if I am first or last and determine the cartography =====
+    min_size = 2 + gs(2)
+    ! Initialize first and last to determine if I am the the first or the last processes (considering the current direction)
+        ! to require information from this processus
+    allocate(first(2,send_gap_abs(1):send_gap_abs(2)))
+    first(2,:) = 0  ! number of lines for which I am the first
+    allocate(last(2,send_gap_abs(1):send_gap_abs(2)))
+    last(2,:) = 0   ! number of lines for which I am the last
+    ! Initialize cartography
+    cartography(1,:) = 0            ! number of velocity values to receive
+    cartography(2,:) = min_size     ! number of element to send when sending cartography
+    ! And compute cartography, first and last !
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+        first(1,proc_gap) = -proc_gap
+        last(1,proc_gap) = -proc_gap
+        first_condition =  1-2*bl_bound_size + proc_gap*mesh_sc%N_proc(direction)+1
+        last_condition  = -1+2*bl_bound_size + (proc_gap+1)*mesh_sc%N_proc(direction)
+        do ind2 = 1, gs(2)
+            cartography(2+ind2,proc_gap) = 0    ! 2 x number of interval of concern line into the column i2
+            begin_interval = .true.
+            do ind1 = 1, gs(1)
+                ! Does proc_gap belongs to [send_gap(i1,i2,1);send_gap(i1,i2,2)]?
+                if((proc_gap>=send_gap(ind1,ind2,1)).and.(proc_gap<=send_gap(ind1,ind2,2))) then
+                    ! Compute if I am the first.
+                    if (remesh_min(ind1,ind2)< first_condition) first(2,proc_gap) =  first(2,proc_gap)+1
+                    ! Compute if I am the last.
+                    if (remesh_max(ind1,ind2) > last_condition) last(2,proc_gap) =  last(2,proc_gap)+1
+                    ! Update cartography // Needed even if target processus is myself as we us buffer
+                    ! in all the case (scalar field cannot be used directly during the remeshing)
+                    if (begin_interval) then
+                        cartography(2+ind2,proc_gap) =  cartography(2+ind2,proc_gap)+2
+                        cartography(cartography(2,proc_gap)+1,proc_gap) = ind1
+                        cartography(2,proc_gap) = cartography(2,proc_gap) + 2
+                        cartography(cartography(2,proc_gap),proc_gap) = ind1
+                        begin_interval = .false.
+                    else
+                        cartography(cartography(2,proc_gap),proc_gap) = ind1
+                    end if
+                else
+                    begin_interval = .true.
+                end if
+            end do
+        end do
+    end do
+
+    ! ===== Send information about first and last  =====
+    tag_table = compute_tag(ind_group, tag_obtsend_NP, direction)
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+        ! I am the first ?
+        if (first(2,proc_gap)>0) then
+            if(neighbors(direction,proc_gap)/= D_rank(direction)) then
+                call mpi_ISsend(first(1,proc_gap), 2, MPI_INTEGER, neighbors(direction,proc_gap), &
+                        & tag_table(1), D_comm(direction), send_request(proc_gap,1), ierr)
+                send_request(proc_gap,3) = 1
+            else
+                rece_gap(1,1) = min(rece_gap(1,1), -proc_gap)
+                rece_gap(2,1) = rece_gap(2,1) + first(2,proc_gap)
+            end if
+        end if
+        ! I am the last ?
+        if (last(2,proc_gap)>0) then
+            if(neighbors(direction,proc_gap)/= D_rank(direction)) then
+                call mpi_ISsend(last(1,proc_gap), 2, MPI_INTEGER, neighbors(direction,proc_gap), &
+                        & tag_table(2), D_comm(direction), send_request(proc_gap,2), ierr)
+                send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
+            else
+                rece_gap(1,2) = max(rece_gap(1,2), -proc_gap)
+                rece_gap(2,2) = rece_gap(2,2) + last(2,proc_gap)
+            end if
+        end if
+    end do
+
+    ! ===== Receive information form the first and the last processus which need a part of my local velocity field =====
+    do while(rece_gap(2,1) < gp_size)
+        call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+        rece_gap(1,1) = min(rece_gap(1,1), rece_buffer(1))
+        rece_gap(2,1) = rece_gap(2,1) + rece_buffer(2)
+    end do
+    do while(rece_gap(2,2) < gp_size)
+        call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+        rece_gap(1,2) = max(rece_gap(1,2), rece_buffer(1))
+        rece_gap(2,2) = rece_gap(2,2) + rece_buffer(2)
+    end do
+
+    ! ===== Free Isend buffer =====
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+        select case (send_request(proc_gap,3))
+            case (3)
+                call mpi_wait(send_request(proc_gap,1), statut, ierr)
+                call mpi_wait(send_request(proc_gap,2), statut, ierr)
+            case (2)
+                call mpi_wait(send_request(proc_gap,2), statut, ierr)
+            case (1)
+                call mpi_wait(send_request(proc_gap,1), statut, ierr)
+        end select
+    end do
+
+    ! ===== Deallocate fields =====
+    deallocate(send_request)
+    deallocate(first)
+    deallocate(last)
+
+end subroutine AC_remesh_determine_communication
+
+
+!> Determine the set of processes wich will send me information during the remeshing
+!! and compute for each of these processes the range of wanted data. Version for M'6
+!! scheme (some implicitation can not be done anymore)
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[out]       rece_gap    = coordinate range of processes which will send me information during the remeshing.
+!!    @param[in]        send_gap    = distance between me and processus to wich I will send information (for each line of the group)
+!!    @param[in]        send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+!!    @param[in,out]    cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!! @details
+!!    Work on a group of line of size gs(1) x gs(2))
+!!    Obtain the list of processus which are associated to sub-domain where my particles
+!!    will be remeshed and the list of processes wich contains particles which
+!!    have to be remeshed in my sub-domain. This way, this procedure determine
+!!    which processus need to communicate together in order to proceed to the
+!!    remeshing (as in a parrallel context the real space is subdivised and each
+!!    processus contains a part of it)
+!!        In the same time, it computes for each processus with which I will
+!!    communicate, the range of mesh point involved for each line of particles
+!!    inside the group and it stores it by using some sparse matrix technics
+!!    (see cartography defined in the algorithm documentation)
+!!        This routine involves communication to determine if a processus is
+!!    the first or the last processes (considering its coordinate along
+!!    the current directory) to send remeshing information to a given processes.
+subroutine AC_remesh_determine_communication_com(direction, gs, ind_group, &
+    & rece_gap, send_gap, send_gap_abs, cartography)
+! XXX Work only for periodic condition.
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+    use mpi
+
+    ! Input/output
+    integer, intent(in)                                 :: direction
+    integer, dimension(2), intent(in)                   :: gs           ! a group size
+    integer, dimension(2), intent(in)                   :: ind_group
+    integer, dimension(2, 2), intent(out)               :: rece_gap     ! minimal and maximal processus which will remesh inside me
+    integer(kind=4), dimension(gs(1),gs(2),2),intent(in):: send_gap     ! minimal and maximal processus which contains the sub-domains where my
+                                                                        ! particles will be remeshed for each line of the line group
+    integer, dimension(2), intent(in)                   :: send_gap_abs ! min and maximal processus which contains the sub-domains where my particles will be remeshed.
+    integer, dimension(2+gs(2)*(2+3*gs(1)), &
+        & send_gap_abs(1):send_gap_abs(2)), intent(out) :: cartography
+
+    ! To manage communications and to localize sub-domain
+    integer(kind=4)                         :: proc_gap         ! gap between a processus coordinate (along the current
+                                                                ! direction) into the mpi-topology and my coordinate
+    integer, dimension(2)                   :: tag_table        ! mpi message tag (for communicate rece_gap(1) and rece_gap(2))
+    integer, dimension(:,:),allocatable     :: send_request     ! mpi status of nonblocking send
+    integer                                 :: ierr             ! mpi error code
+    integer, dimension(MPI_STATUS_SIZE)     :: statut           ! mpi status
+    ! To determine which processus is the first/last to send data to another
+    integer, dimension(gs(1), gs(2))        :: send_max_prev    ! maximum gap between previous processus and the receivers of its remeshing buffer
+    integer, dimension(gs(1), gs(2))        :: send_min_next    ! minimum gap between next processus and the receivers of its remeshing buffer
+    integer, dimension(:,:), allocatable    :: first, last      ! Storage processus to which I will be the first (or the last) to send
+                                                                ! remeshed particles
+    ! Other local variable
+    integer                                 :: ind1, ind2       ! indice of the current line inside the group
+    integer                                 :: min_size         ! begin indice in first and last to stock indice along first dimension of the group line
+    integer                                 :: gp_size          ! group size
+    integer,dimension(2)                    :: rece_buffer      ! buffer for reception of rece_max
+    logical                                 :: begin_interval   ! are we in the start of an interval ?
+
+    rece_gap(1,1) = 3*mesh_sc%N(direction)
+    rece_gap(1,2) = -3*mesh_sc%N(direction)
+    rece_gap(2,:) = 0
+    gp_size = gs(1)*gs(2)
+
+    allocate(send_request(send_gap_abs(1):send_gap_abs(2),3))
+    send_request(:,3) = 0
+
+    ! ===== Exchange ghost =====
+    ! Compute message tag - we re-use tag_part_tag_NP id as using this procedure
+    ! suppose not using "AC_type_and_block"
+    tag_table = compute_tag(ind_group, tag_part_tag_NP, direction)
+    ! Exchange "ghost"
+    call mpi_Sendrecv(send_gap(1,1,1), gp_size, MPI_INTEGER, neighbors(direction,-1), tag_table(1), &
+            & send_min_next(1,1), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(1),    &
+            & D_comm(direction), statut, ierr)
+    call mpi_Sendrecv(send_gap(1,1,2), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(2), &
+            & send_max_prev(1,1), gp_size, MPI_INTEGER, neighbors(direction,-1), tag_table(2),    &
+            & D_comm(direction), statut, ierr)
+    ! Translat to adapt gap to my position
+    send_max_prev = send_max_prev - 1
+    send_min_next = send_min_next + 1
+
+    ! ===== Compute if I am first or last and determine the cartography =====
+    min_size = 2 + gs(2)
+    ! Initialize first and last to determine if I am the the first or the last processes (considering the current direction)
+        ! to require information from this processus
+    allocate(first(2,send_gap_abs(1):send_gap_abs(2)))
+    first(2,:) = 0  ! number of lines for which I am the first
+    allocate(last(2,send_gap_abs(1):send_gap_abs(2)))
+    last(2,:) = 0   ! number of lines for which I am the last
+    ! Initialize cartography
+    cartography(1,:) = 0            ! number of velocity values to receive
+    cartography(2,:) = min_size     ! number of element to send when sending cartography
+    ! And compute cartography, first and last !
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+        first(1,proc_gap) = -proc_gap
+        last(1,proc_gap) = -proc_gap
+        do ind2 = 1, gs(2)
+            cartography(2+ind2,proc_gap) = 0    ! 2 x number of interval of concern line into the column i2
+            begin_interval = .true.
+            do ind1 = 1, gs(1)
+                ! Does proc_gap belongs to [send_gap(i1,i2,1);send_gap(i1,i2,2)]?
+                if((proc_gap>=send_gap(ind1,ind2,1)).and.(proc_gap<=send_gap(ind1,ind2,2))) then
+                    ! Compute if I am the first.
+                    if(proc_gap > send_max_prev(ind1,ind2)) first(2,proc_gap) =  first(2,proc_gap)+1
+                    ! Compute if I am the last.
+                    if(proc_gap < send_min_next(ind1,ind2)) last(2,proc_gap) =  last(2,proc_gap)+1
+                    ! Update cartography // Needed even if target processus is myself as we us buffer
+                    ! in all the case (scalar field cannot be used directly during the remeshing)
+                    if (begin_interval) then
+                        cartography(2+ind2,proc_gap) =  cartography(2+ind2,proc_gap)+2
+                        cartography(cartography(2,proc_gap)+1,proc_gap) = ind1
+                        cartography(2,proc_gap) = cartography(2,proc_gap) + 2
+                        cartography(cartography(2,proc_gap),proc_gap) = ind1
+                        begin_interval = .false.
+                    else
+                        cartography(cartography(2,proc_gap),proc_gap) = ind1
+                    end if
+                else
+                    begin_interval = .true.
+                end if
+            end do
+        end do
+    end do
+
+    ! ===== Send information about first and last  =====
+    tag_table = compute_tag(ind_group, tag_obtsend_NP, direction)
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+        ! I am the first ?
+        if (first(2,proc_gap)>0) then
+            if(neighbors(direction,proc_gap)/= D_rank(direction)) then
+                call mpi_ISsend(first(1,proc_gap), 2, MPI_INTEGER, neighbors(direction,proc_gap), &
+                        & tag_table(1), D_comm(direction), send_request(proc_gap,1), ierr)
+                send_request(proc_gap,3) = 1
+            else
+                rece_gap(1,1) = min(rece_gap(1,1), -proc_gap)
+                rece_gap(2,1) = rece_gap(2,1) + first(2,proc_gap)
+            end if
+        end if
+        ! I am the last ?
+        if (last(2,proc_gap)>0) then
+            if(neighbors(direction,proc_gap)/= D_rank(direction)) then
+                call mpi_ISsend(last(1,proc_gap), 2, MPI_INTEGER, neighbors(direction,proc_gap), &
+                        & tag_table(2), D_comm(direction), send_request(proc_gap,2), ierr)
+                send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
+            else
+                rece_gap(1,2) = max(rece_gap(1,2), -proc_gap)
+                rece_gap(2,2) = rece_gap(2,2) + last(2,proc_gap)
+            end if
+        end if
+    end do
+
+    ! ===== Receive information form the first and the last processus which need a part of my local velocity field =====
+    do while(rece_gap(2,1) < gp_size)
+        call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+        rece_gap(1,1) = min(rece_gap(1,1), rece_buffer(1))
+        rece_gap(2,1) = rece_gap(2,1) + rece_buffer(2)
+    end do
+    do while(rece_gap(2,2) < gp_size)
+        call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+        rece_gap(1,2) = max(rece_gap(1,2), rece_buffer(1))
+        rece_gap(2,2) = rece_gap(2,2) + rece_buffer(2)
+    end do
+
+    ! ===== Free Isend buffer =====
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+        select case (send_request(proc_gap,3))
+            case (3)
+                call mpi_wait(send_request(proc_gap,1), statut, ierr)
+                call mpi_wait(send_request(proc_gap,2), statut, ierr)
+            case (2)
+                call mpi_wait(send_request(proc_gap,2), statut, ierr)
+            case (1)
+                call mpi_wait(send_request(proc_gap,1), statut, ierr)
+        end select
+    end do
+
+    ! ===== Deallocate fields =====
+    deallocate(send_request)
+    deallocate(first)
+    deallocate(last)
+
+end subroutine AC_remesh_determine_communication_com
+
+
+!> Update the cartography of data which will be exchange from a processus to another in order to remesh particles.
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        begin_i1    = indice corresponding to the first place into the cartography
+!!                                      array where indice along the the direction of the group of lines are stored.
+!!    @param[in]        proc_gap    = distance between my (mpi) coordonate and coordinate of the target processus
+!!    @param[in]        ind_carto   = current column inside the cartography (different to proc_Gap as in this procedure
+!!                                    therefore first indice = 1, carto range are not given into argument)
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in,out]    cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!!    @param[out]       com_size    = number of elements (integers) stored into the cartography (which will be the size of some mpi communication)
+subroutine AC_remesh_cartography(direction, gs, begin_i1, proc_gap, ind_carto, send_min, send_max, cartography, com_size)
+
+    use cart_topology           ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                     :: direction
+    integer, dimension(2), intent(in)       :: gs
+    integer, intent(in)                     :: begin_i1     ! indice corresponding to the first place into the cartography
+                                                            ! array where indice along the the direction of the group of
+                                                            ! lines are stored.
+    integer, intent(in)                     :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the target
+    integer, intent(in)                     :: ind_carto    ! current column inside the cartography (different to proc_Gap as in this procedure
+                                                            ! therefore first indice = 1, carto range are not given into argument)
+    integer, dimension(:,:), intent(in)     :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)     :: send_max     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(inout)  :: cartography
+    integer, intent(out)                    :: com_size     ! number of elements (integers) stored into the cartography (which will
+                                                            ! be the size of some mpi communication)
+
+    ! Other local variables
+    integer                                 :: gap          ! gap between my local indices and the local indices from another processes
+    integer                                 :: i1, i2       ! indice of a line into the group
+    integer                                 :: ind_for_i1   ! where to read the first coordinate (i1) of the current line inside the cartography ?
+    integer                                 :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
+
+    cartography(1,ind_carto) = 0
+    ! Use the cartography to know which lines are concerned
+    com_size = cartography(2,ind_carto)
+    ! Range I want - store into the cartography
+    gap = proc_gap*mesh_sc%N_proc(direction)
+    ! Position in cartography(:,ind_carto) of the current i1 indice
+    ind_for_i1 = begin_i1
+    do i2 = 1, gs(2)
+        do ind_1Dtable = ind_for_i1+1, ind_for_i1 + cartography(2+i2,ind_carto), 2
+            do i1 = cartography(ind_1Dtable,ind_carto), cartography(ind_1Dtable+1,ind_carto)
+                ! Interval start from:
+                cartography(com_size+1,ind_carto) = max(send_min(i1,i2), gap+1) ! fortran => indice start from 0
+                ! and ends at:
+                cartography(com_size+2,ind_carto) = min(send_max(i1,i2), gap+mesh_sc%N_proc(direction))
+                ! update number of element to send
+                cartography(1,ind_carto) = cartography(1,ind_carto) &
+                            & + cartography(com_size+2,ind_carto) &
+                            & - cartography(com_size+1,ind_carto) + 1
+                com_size = com_size+2
+            end do
+        end do
+        ind_for_i1 = ind_for_i1 + cartography(2+i2,ind_carto)
+    end do
+
+end subroutine AC_remesh_cartography
+
+
+!> Perform all the pre-process in order to remesh particle and to perform associated communication.
+!! @ detail
+!!     As geometric domain is subdivise among the different mpi-processes, the
+!! particle remeshing involve mpi-communication in order to re-distribuate
+!! particle weight to the rigth place.
+!!     In order to gather theses communications for different particles lines,
+!! the particle remeshing is performed into a buffer. The buffer is an 1D-array
+!! which structure ensure that all the value that has to be send to a given
+!! processus is memory continguous.
+!!     This subroutine create this buffer and provide a map to manage it. This
+!! map allow to associate a XYZ-coordinate (into the geometrical domain) to each
+!! element of this 1D-array.
+subroutine AC_remesh_init(direction, ind_group, gs, send_min, send_max, &
+    & send_gap_abs, rece_gap, nb_s, cartography, rece_carto,            &
+    & pos_in_buffer, min_size, max_size, s_request_ran, r_request_ran)
+
+    use mpi
+    use cart_topology     ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                     :: direction
+    integer, dimension(2), intent(in)       :: ind_group
+    integer, dimension(2), intent(in)       :: gs
+    integer, dimension(:,:), intent(in)     :: send_min     ! distance between me and first processus wich send me information (for each line of particle)
+    integer, dimension(:,:), intent(in)     :: send_max     ! distance between me and last processus wich send me information
+    integer, dimension(2), intent(in)       :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2, 2), intent(in)    :: rece_gap     ! distance between me and processus to wich I send information
+    integer, intent(in)                     :: nb_s         ! number of reception/send
+    integer, dimension(:,:), intent(inout)  :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+                                                            ! current processus will send data during remeshing and for each of these lines the range
+    integer, dimension(:,:), intent(inout)  :: rece_carto   ! same as abobve but for what I receive
+                                                            ! of mesh points from where it requiers the velocity values.
+    integer,dimension(0:nb_s),intent(inout) :: pos_in_buffer! information about organization of the 1D buffer used to remesh
+                                                            ! a 3D set of particles.
+    integer, intent(out)                    :: min_size     ! tool to manage cartography
+    integer, intent(in)                     :: max_size     ! tool to manage cartography
+    integer, dimension(:), intent(inout)    :: s_request_ran! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), intent(inout)    :: r_request_ran! mpi communication request (handle) of nonblocking receive
+
+    ! Others
+    integer                                 :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the
+                                                            ! processus associated to a given position
+    integer                                 :: ind_gap      ! loop indice
+    integer                                 :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
+    ! Variable use to manage mpi communications
+    integer                                 :: com_size     ! size of message send/receive
+    integer                                 :: tag          ! mpi message tag
+    integer                                 :: ierr         ! mpi error code
+
+    ! ===== Receive cartography =====
+    ! It is better to post recceive before sending.
+    ind_1Dtable = 0
+    do proc_gap = rece_gap(1,1), rece_gap(1,2)
+        ind_1Dtable = ind_1Dtable + 1
+        if (neighbors(direction,proc_gap)/= D_rank(direction)) then
+            tag = compute_tag(ind_group, tag_bufToScal_range, direction, -proc_gap)
+            call mpi_Irecv(rece_carto(1,ind_1Dtable), max_size, MPI_INTEGER,  &
+                & neighbors(direction,proc_gap), tag, D_COMM(direction),      &
+                & r_request_ran(ind_1Dtable), ierr)
+        else
+            rece_carto(1,ind_1Dtable) = 0
+        end if
+    end do
+
+    ! ===== Complete cartography and send range about the particles I remesh =====
+    s_request_ran = MPI_REQUEST_NULL
+    min_size = 2 + gs(2)
+    proc_gap = send_gap_abs(1) - 1
+    do ind_gap = 1, nb_s !send_gap_abs(2), send_gap_abs(1) + 1
+        proc_gap = proc_gap + 1
+        !proc_gap = ind_gap+send_gap_abs(1)-1
+        call AC_remesh_cartography(direction, gs, min_size, proc_gap, ind_gap, &
+            & send_min, send_max, cartography, com_size)
+#ifdef PART_DEBUG
+            if(com_size>max_size) then
+                print*, 'taille carto = ', com_size ,' plus grand que la taille théorique ', &
+                    & max_size,' et carto = ', cartography(:,ind_gap)
+            end if
+#endif
+        ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction and unique Id.
+        tag = compute_tag(ind_group, tag_bufToScal_range, direction, proc_gap)
+        ! Send message
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            call mpi_ISsend(cartography(1,ind_gap), com_size, MPI_INTEGER,&
+                & neighbors(direction,proc_gap), tag, D_comm(direction),  &
+                & s_request_ran(ind_gap),ierr)
+        end if
+    end do
+
+    ! ===== Initialize the general buffer =====
+    ! The same buffer is used to send data to all target processes. It size
+    ! has to be computed as the part reserved to each processus.
+    ! and it has to be splitted into parts for each target processes
+    ! => pos_in_buffer(i) = first (1D-)indice of the sub-array of send_buffer
+    ! associated to he i-rd mpi-processus to wich I will send remeshed particles.
+    pos_in_buffer(0) = 1
+    pos_in_buffer(1)   = 1
+    do ind_gap =1, nb_s - 1 !send_gap_abs(2)-send_gap_abs(1)
+        pos_in_buffer(ind_gap+1)= pos_in_buffer(ind_gap) + cartography(1,ind_gap)
+    end do
+    ! In writing values in the send buffer during the remeshing, pos_in_buffer will be update.
+    ! As it has one supplementary element (the "0" one), after this process pos_in_buffer(i-1)
+    ! will be equal to first (1D-)indice of the sub-array of send_buffer
+    ! associated to he i-rd mpi-processus to wich I will send remeshed particles.
+
+end subroutine AC_remesh_init
+
+!> Perform all the staff to compute scalar value at t+dt from the buffer
+!containing the remeshing of local particles.
+!! @ detail
+!!     After having remeshing the particles of the local sub-domain into a
+!! buffer, it remains to send the buffer to the different processus according
+!! to the domain sub-division into each processus. Then, the local scalar field
+!! is update thanks to the received buffers.
+subroutine AC_remesh_finalize(direction, ind_group, gs, j, k, scal, send_gap_abs, rece_gap, &
+    & nb_r, nb_s, cartography, rece_carto, send_buffer, pos_in_buffer, min_size)
+
+    use mpi
+    use cart_topology     ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP),dimension(:,:,:),intent(inout)     :: scal
+    integer, dimension(2), intent(in)           :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2, 2), intent(in)        :: rece_gap     ! distance between me and processus to wich I send information
+    integer, intent(in)                         :: nb_r, nb_s   ! number of reception/send
+    integer, dimension(:,:), intent(in)         :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+                                                                ! current processus will send data during remeshing and for each of these lines the range
+                                                                ! of mesh points from where it requiers the velocity values.
+    integer, dimension(:,:), intent(in)         :: rece_carto   ! same as above but for what I receive
+    real(WP),dimension(:), intent(in)           :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+                                                                ! sorted by receivers and not by coordinate.
+    integer, dimension(0:nb_s), intent(inout)   :: pos_in_buffer! buffer size
+    integer, intent(in)                         :: min_size     ! tool to mange buffer - begin indice in first and last to stock indice along first dimension of the group line
+
+    ! Other local variables
+    integer                                 :: proc_gap, gap! distance between my (mpi) coordonate and coordinate of the
+                                                            ! processus associated to a given position
+    integer                                 :: ind_gap
+    integer                                 :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
+    ! Variable used to update scalar field from the buffers
+    real(WP),dimension(:),allocatable,target:: rece_buffer  ! buffer use to receive scalar field from other processes.
+    integer, dimension(:), allocatable      :: rece_pos     ! cells of indice from rece_pos(i) to rece_proc(i+1) into rece_buffer
+                                                            ! are devoted to the processus of relative position = i
+    ! Variable use to manage mpi communications
+    integer, dimension(:), allocatable      :: s_request_sca! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable      :: r_request_sca! mpi communication request (handle) of nonblocking receive
+#ifndef BLOCKING_SEND
+    integer, dimension(:,:), allocatable    :: s_status     ! mpi communication status of nonblocking send
+#endif
+    integer, dimension(mpi_status_size)     :: r_status   ! another mpi communication status
+    integer                                 :: tag          ! mpi message tag
+    integer                                 :: ierr         ! mpi error code
+    integer                                 :: missing_msg  ! number of remeshing buffer not yet received
+
+
+    ! ===== Receive buffer (init receive before send) =====
+    ! -- Compute size of reception buffer and split it into part corresponding to each sender --
+    allocate(rece_pos(rece_gap(1,1):rece_gap(1,2)+1))
+    rece_pos(rece_gap(1,1)) = 1
+    ind_gap = 0
+    do proc_gap = rece_gap(1,1), rece_gap(1,2)
+        ind_gap = ind_gap + 1
+        rece_pos(proc_gap+1)= rece_pos(proc_gap) + rece_carto(1,ind_gap)
+    end do
+    allocate(rece_buffer(rece_pos(rece_gap(1,2)+1)-1))
+    ! -- And initialize the reception --
+    allocate(r_request_sca(1:nb_r))
+    r_request_sca = MPI_REQUEST_NULL
+    ind_gap = 0
+    do proc_gap = rece_gap(1,1), rece_gap(1,2)
+        ind_gap = ind_gap + 1 ! = proc_gap - rece_gap(1,1)+1
+        if (neighbors(direction,proc_gap)/= D_rank(direction)) then
+            tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, -proc_gap)
+            call mpi_Irecv(rece_buffer(rece_pos(proc_gap)), rece_carto(1,ind_gap),  &
+                & MPI_DOUBLE_PRECISION, neighbors(direction,proc_gap), tag,         &
+                & D_COMM(direction), r_request_sca(ind_gap), ierr)
+        end if
+    end do
+
+    ! ===== Send buffer =====
+    missing_msg = nb_r
+    allocate(s_request_sca(1:nb_s))
+    s_request_sca = MPI_REQUEST_NULL
+    proc_gap = send_gap_abs(1)-1
+    ! -- Send the buffer to the matching processus and update the scalar field --
+    do ind_gap = 1, nb_s
+        proc_gap = proc_gap +1
+        !proc_gap = ind_gap-1+send_gap_abs(1)
+        if (neighbors(direction,proc_gap)/=D_rank(direction)) then
+            ! Send buffer
+            tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, ind_gap-1+send_gap_abs(1))
+#ifdef BLOCKING_SEND
+            call mpi_Send(send_buffer(pos_in_buffer(ind_gap-1)), cartography(1,ind_gap), MPI_DOUBLE_PRECISION, &
+                & neighbors(direction,proc_gap), tag, D_comm(direction), r_status, ierr)
+#else
+            call mpi_ISsend(send_buffer(pos_in_buffer(ind_gap-1)), cartography(1,ind_gap), MPI_DOUBLE_PRECISION, &
+                & neighbors(direction,proc_gap), tag, D_comm(direction), s_request_sca(ind_gap),ierr)
+#endif
+        else
+            ! Range I want - store into the cartography
+            !gap = -(ind_gap-1+send_gap_abs(1))*mesh_sc%N_proc(direction)
+            gap = -proc_gap*mesh_sc%N_proc(direction)
+            ! Update directly the scalar field
+            call remesh_buffer_to_scalar_pt(gs, j, k, ind_gap, gap, min_size, &
+                    & cartography, send_buffer, scal, pos_in_buffer(ind_gap-1))
+            missing_msg = missing_msg - 1
+        end if
+    end do
+
+    ! ===== Update scalar field =====
+    do while (missing_msg >= 1)
+        ! --- Choose one of the first available message ---
+        ! more precisly: the last reception ended (and not free) and if not such
+        ! message available, the first reception ended.
+        call mpi_waitany(nb_r, r_request_sca, ind_1Dtable, r_status, ierr)
+        ! -- Update the scalar field by using the cartography --
+        ! Range I want - store into the cartography
+        proc_gap = ind_1Dtable + rece_gap(1,1)-1
+        gap = proc_gap*mesh_sc%N_proc(direction)
+        call remesh_buffer_to_scalar_pt(gs, j, k, ind_1Dtable, gap, min_size, &
+                & rece_carto, rece_buffer, scal, rece_pos(proc_gap))
+        missing_msg = missing_msg - 1
+    end do
+
+    ! ===== Free memory and communication buffer ====
+    ! -- Deallocate all field --
+    deallocate(rece_pos)
+    deallocate(rece_buffer)
+    deallocate(r_request_sca)
+#ifndef BLOCKING_SEND
+    ! -- Check if Isend are done --
+    allocate(s_status(MPI_STATUS_SIZE,1:nb_s))
+    call mpi_waitall(nb_s, s_request_sca, s_status, ierr)
+    deallocate(s_status)
+    ! -- Free all communication buffer and data --
+    deallocate(s_request_sca)
+#endif
+
+end subroutine AC_remesh_finalize
+
+
+end module advec_common_remesh
+!> @}
diff --git a/HySoP/src/scalesInterface/particles/advec_correction.f90 b/HySoP/src/scalesInterface/particles/advec_correction.f90
index 6b273c3dd..6549ff5b1 100644
--- a/HySoP/src/scalesInterface/particles/advec_correction.f90
+++ b/HySoP/src/scalesInterface/particles/advec_correction.f90
@@ -1,3 +1,4 @@
+!USEFORTEST advec
 !> @addtogroup part
 !! @{
 !------------------------------------------------------------------------------
@@ -64,17 +65,17 @@ subroutine AC_type_and_block_group(dt, dir, gp_s, ind_group, p_V, &
                 & bl_type, bl_tag)
 
     use mpi
+    use precision_tools ! define working precision_tools (double or simple)
     use cart_topology   ! info about mesh and mpi topology
     use advec_variables ! contains info about solver parameters and others.
-    use precision_tools       ! define working precision_tools (double or simple)
 
-    real(WP), intent(in)                                        :: dt           ! time step
-    integer, intent(in)                                         :: dir
-    integer, dimension(2),intent(in)                            :: gp_s         ! groupe size
-    integer, dimension(2), intent(in)                           :: ind_group    ! group indice
-    real(WP), dimension(:,:,:), intent(in)                      :: p_V
-    logical,dimension(bl_nb(dir)+1,gp_s(1),gp_s(2)),intent(out) :: bl_type      ! is the particle block a center block or a left one ?
-    logical,dimension(bl_nb(dir),gp_s(1),gp_s(2)),intent(out)   :: bl_tag       ! indice of tagged particles
+    real(WP), intent(in)                      :: dt           ! time step
+    integer, intent(in)                       :: dir
+    integer, dimension(2),intent(in)          :: gp_s         ! groupe size
+    integer, dimension(2), intent(in)         :: ind_group    ! group indice
+    real(WP), dimension(:,:,:), intent(in)    :: p_V
+    logical,dimension(:,:,:),intent(out)      :: bl_type      ! is the particle block a center block or a left one ?
+    logical,dimension(:,:,:),intent(out)      :: bl_tag       ! indice of tagged particles
 
     real(WP),dimension(bl_nb(dir)+1,gp_s(1),gp_s(2))            :: bl_lambdaMin ! for a particle, lamda = V*dt/dx ;  bl_lambdaMin = min of
                                                                                 ! lambda on a block (take also into account first following particle)
@@ -93,7 +94,7 @@ subroutine AC_type_and_block_group(dt, dir, gp_s, ind_group, p_V, &
     integer                                                     :: ierr         ! mpi error code
 
     ! ===== Initialisation =====
-    cfl = dt/d_sc(dir)
+    cfl = dt/mesh_sc%dx(dir)
     com_size = gp_s(1)*gp_s(2)
 
     ! ===== Compute bl_lambdaMin =====
@@ -101,9 +102,9 @@ subroutine AC_type_and_block_group(dt, dir, gp_s, ind_group, p_V, &
     ! Receive ghost value, ie value from neighbors boundaries.
     tag_table = compute_tag(ind_group, tag_part_tag_NP, dir)
     call mpi_Irecv(lambN(1,1), com_size, MPI_DOUBLE_PRECISION, &
-            & neighbors(dir,2), tag_table(1), D_comm(dir), rece_request(1), ierr)
+            & neighbors(dir,1), tag_table(1), D_comm(dir), rece_request(1), ierr)
     call mpi_Irecv(lambP(1,1), com_size, MPI_DOUBLE_PRECISION, &
-            &  neighbors(dir,1), tag_table(2), D_comm(dir), rece_request(2), ierr)
+            &  neighbors(dir,-1), tag_table(2), D_comm(dir), rece_request(2), ierr)
 
     ! -- For the first block (1/2) --
     ! The domain contains only its second half => exchange ghost with the previous processus
@@ -111,15 +112,15 @@ subroutine AC_type_and_block_group(dt, dir, gp_s, ind_group, p_V, &
     !tag_table = compute_tag(ind_group, tag_part_tag_NP, dir)   ! Tag table is already equals to this.
     ! Send message
     call mpi_ISsend(lambB(1,1), com_size, MPI_DOUBLE_PRECISION, &
-            & neighbors(dir,1), tag_table(1), D_comm(dir), send_request(1), ierr)
+            & neighbors(dir,-1), tag_table(1), D_comm(dir), send_request(1), ierr)
 
     ! -- For the last block (1/2) --
     ! The processus contains only its first half => exchange ghost with the next processus
     ind = bl_nb(dir) + 1
-    lambE = minval(p_V(N_proc(dir) - (bl_size/2)+1 :N_proc(dir),:,:),1)*cfl
+    lambE = minval(p_V(mesh_sc%N_proc(dir) - (bl_size/2)+1 :mesh_sc%N_proc(dir),:,:),1)*cfl
     ! Send message
     call mpi_ISsend(lambE(1,1), com_size, MPI_DOUBLE_PRECISION, &
-            & neighbors(dir,2), tag_table(2), D_comm(dir), send_request(2), ierr)
+            & neighbors(dir,1), tag_table(2), D_comm(dir), send_request(2), ierr)
 
     ! -- For the "middle" block --
     do ind = 2, bl_nb(dir)
@@ -200,7 +201,7 @@ subroutine AC_limitator_from_slopes(direction, gp_s, p_pos, &
     ! ===== Compute slope and limitator =====
     ! Van Leer limitator function (limit = limitator/8)
     ! -- For the "middle" and the "last" block --
-    do ind = 2, N_proc(direction)
+    do ind = 2, mesh_sc%N_proc(direction)
         where(deltaS(:,:,ind)/=0)
             afl = p_pos(ind,:,:)
             afl = afl - nint(afl)
@@ -224,13 +225,13 @@ subroutine AC_limitator_from_slopes(direction, gp_s, p_pos, &
     ! 1 - limit(1) - limitator at 1/2 is already compute on the previous mpi-rank (limit(N_proc+1) !)
     ! 2 - limit(2) - limitator at 1+1/2 requires deltaS(0) = scalar slope between scalar(0) and scalar(-1) which is already compute on previous rank
     ! Send these values
-    Sbuffer(1,:,:) = limit(N_proc(direction)+1,:,:)
-    Sbuffer(2,:,:) = deltaS(:,:,N_proc(direction))
+    Sbuffer(1,:,:) = limit(mesh_sc%N_proc(direction)+1,:,:)
+    Sbuffer(2,:,:) = deltaS(:,:,mesh_sc%N_proc(direction))
     call mpi_ISsend(Sbuffer(1,1,1), com_size, MPI_DOUBLE_PRECISION, &
-            & neighbors(direction,2), tag_mpi, D_comm(direction), send_request, ierr)
+            & neighbors(direction,1), tag_mpi, D_comm(direction), send_request, ierr)
     ! Receive it !
     call mpi_recv(Rbuffer(1,1,1), com_size, MPI_DOUBLE_PRECISION, &
-            &  neighbors(direction,1), tag_mpi, D_comm(direction),rece_status, ierr)
+            &  neighbors(direction,-1), tag_mpi, D_comm(direction),rece_status, ierr)
     ! Get limit(1) = limitator at 1/2
     limit(1,:,:) = Rbuffer(1,:,:)
     ! Get limit(2) = limitator at 1+1/2
diff --git a/HySoP/src/scalesInterface/particles/advec_remesh_Mprime.f90 b/HySoP/src/scalesInterface/particles/advec_remesh_Mprime.f90
index 60ee52d14..4041be611 100644
--- a/HySoP/src/scalesInterface/particles/advec_remesh_Mprime.f90
+++ b/HySoP/src/scalesInterface/particles/advec_remesh_Mprime.f90
@@ -1,3 +1,4 @@
+!USEFORTEST advec
 !> @addtogroup part
 
 !------------------------------------------------------------------------------
@@ -12,7 +13,7 @@
 !! @details
 !! It provides M'6 and M'8 remeshing formula.
 !!   These M' formula appear as only involving stability condition depending on
-!! velocity gradient rather than CFL number. Thus, they allow to use large
+!! velocity gradient rather than CFL number. Thus, they allow us to use large
 !! time-step. The stability constant is equal to 1 (ie the condition is
 !! dt < gradiend(velocity)) where the numerical gradient is computed with
 !! finite-difference scheme.
@@ -35,7 +36,7 @@
 
 module advec_remeshing_Mprime
 
-    use precision_tools
+    use structure_tools
     use advec_common_line
 
     implicit none
@@ -61,10 +62,15 @@ module advec_remeshing_Mprime
 !       end subroutine AC_remesh_Mprime
 !   end interface
 
+    ! ===== Public variable =====
+    !> To know wich diffusion coefficient to use.
+    integer, public                                     :: sc_remesh_ind
+    integer, public                                     :: current_dir = 1
+
     ! ===== Public procedures =====
     ! Wrapper to M' remeshing formula (actually pointer to the right subroutine)
-    procedure(AC_remesh_Mprime6_array), pointer, public ::  AC_remesh_Mprime_array  => null()   !> wrapper to M' remeshing formula - buffer are stored in classical array
-    procedure(AC_remesh_Mprime6_pter),  pointer, public ::  AC_remesh_Mprime_pter   => null()   !> wrapper to M' remeshing formula - buffer are stored via an array of pointer
+    procedure(AC_remesh_Mstar6_array), pointer, public ::  AC_remesh_Mprime_array  => null()   !> wrapper to M' remeshing formula - buffer are stored in classical array
+    procedure(AC_remesh_Mstar6_pter),  pointer, public ::  AC_remesh_Mprime_pter   => null()   !> wrapper to M' remeshing formula - buffer are stored via an array of pointer
     ! To get the right "line remeshing" wrapper
     public                              :: AC_remesh_init_Mprime
     !----- M'4 remeshing formula -----
@@ -72,9 +78,9 @@ module advec_remeshing_Mprime
     public                              :: AC_remesh_Mprime4_array      ! use 4 grid point, 2 for each side of the particle.
     public                              :: AC_remesh_Mprime4_pter       ! use 4 grid point, 2 for each side of the particle.
     !----- M'6 remeshing formula -----
-    public                              :: AC_remesh_Mprime6    ! use 6 grid point, 3 for each side of the particle.
-    public                              :: AC_remesh_Mprime6_array      ! use 6 grid point, 3 for each side of the particle.
-    public                              :: AC_remesh_Mprime6_pter       ! use 6 grid point, 3 for each side of the particle.
+    public                              :: AC_remesh_Mstar6    ! use 6 grid point, 3 for each side of the particle.
+    public                              :: AC_remesh_Mstar6_array      ! use 6 grid point, 3 for each side of the particle.
+    public                              :: AC_remesh_Mstar6_pter       ! use 6 grid point, 3 for each side of the particle.
     !----- M'8 remeshing formula -----
     public                              :: AC_remesh_Mprime8    ! use 8 grid point, 4 for each side of the particle.
     public                              :: AC_remesh_Mprime8_array
@@ -86,11 +92,10 @@ module advec_remeshing_Mprime
     interface AC_remesh_Mprime4
         module procedure AC_remesh_Mprime4_pter, AC_remesh_Mprime4_array
     end interface AC_remesh_Mprime4
-
     ! -- M'6: array of real or of pointer --
-    interface AC_remesh_Mprime6
-        module procedure AC_remesh_Mprime6_pter, AC_remesh_Mprime6_array
-    end interface AC_remesh_Mprime6
+    interface AC_remesh_Mstar6
+        module procedure AC_remesh_Mstar6_pter, AC_remesh_Mstar6_array
+    end interface AC_remesh_Mstar6
 
     ! -- M'8: array of real or of pointer --
     interface AC_remesh_Mprime8
@@ -108,15 +113,21 @@ subroutine AC_remesh_init_Mprime()
     use advec_variables         ! solver context
 
     select case(trim(type_solv))
+    case ('d_M4')
+        AC_remesh_Mprime_array => AC_remesh_Mprime4_diff_array
+        AC_remesh_Mprime_pter  => AC_remesh_Mprime4_diff_pter
     case ('p_M4')
         AC_remesh_Mprime_array => AC_remesh_Mprime4_array
         AC_remesh_Mprime_pter  => AC_remesh_Mprime4_pter
     case ('p_M8')
         AC_remesh_Mprime_array => AC_remesh_Mprime8_array
         AC_remesh_Mprime_pter  => AC_remesh_Mprime8_pter
+    case ('p_L4')
+        AC_remesh_Mprime_array => AC_remesh_L4_4_array
+        AC_remesh_Mprime_pter  => AC_remesh_L4_4_pter
     case default
-        AC_remesh_Mprime_array => AC_remesh_Mprime6_array
-        AC_remesh_Mprime_pter  => AC_remesh_Mprime6_pter
+        AC_remesh_Mprime_array => AC_remesh_Mstar6_array
+        AC_remesh_Mprime_pter  => AC_remesh_Mstar6_pter
     end select
 
 end subroutine AC_remesh_init_Mprime
@@ -124,7 +135,9 @@ end subroutine AC_remesh_init_Mprime
 ! =========================================================================
 ! ============     Interpolation polynom used for remeshing    ============
 ! =========================================================================
+
 !> M'4 remeshing formula - version for array of real
+!! @author Chloe Mimeau, LJK
 !!      @param[in]       dir     = current direction
 !!      @param[in]       pos_adim= adimensionned particle position
 !!      @param[in]       sca     = scalar advected by the particle
@@ -160,19 +173,20 @@ subroutine AC_remesh_Mprime4_array(dir, pos_adim, sca, buffer)
     b0 = 1. - (bM+bP+bP2)
 
     ! remeshing
-    j1 = modulo(j0-2,N(dir))+1  ! j0-1
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
     buffer(j1) = buffer(j1) + sca*bM
-    j1 = modulo(j0-1,N(dir))+1  ! j0
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
     buffer(j1) = buffer(j1) + sca*b0
-    j1 = modulo(j0,N(dir))+1    ! j0+1
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
     buffer(j1) = buffer(j1) + sca*bP
-    j1 = modulo(j0+1,N(dir))+1  ! j0+2
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
     buffer(j1) = buffer(j1) + sca*bP2
 
 end subroutine AC_remesh_Mprime4_array
 
 
 !> M'4 remeshing formula. - version for array of pointer
+!! @author Chloe Mimeau, LJK
 !!      @param[in]       pos_adim= adimensionned particle position
 !!      @param[in]       sca     = scalar advected by the particle
 !!      @param[in,out]   buffer  = temporaly remeshed scalar field
@@ -214,12 +228,120 @@ subroutine AC_remesh_Mprime4_pter(pos_adim, sca, buffer)
 end subroutine AC_remesh_Mprime4_pter
 
 
+!> M'4 remeshing formula with diffusion - version for array of real
+!! @author Jean-baptiste Lagaert, LEGI
+!!      @param[in]       dir     = current direction
+!!      @param[in]       diff_dt_dx = to take in account diffusion, diff = (diffusivity*time step)/((space step)^2)
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_Mprime4_diff_array(dir, pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: j0, j1                   ! indice of the nearest mesh points
+    real(WP)    :: bM, b0, bP, bP2          ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+    real(WP)    :: diff1, diff2             ! remeshing correction to take into account for diffusion term
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - dble(j0))
+
+    ! Compute coefficient for diffusion part
+    diff1 = 1.5*(1.-0.5*4.*sc_diff_dt_dx(sc_remesh_ind,dir))
+    diff2 = 0.5*(1.-1.5*4.*sc_diff_dt_dx(sc_remesh_ind,dir))
+
+    ! Interpolation weights
+    !bM = .5*((2.-(y0+1))**2)*(diff1*(2.-(y0+1))/3.-diff2*(y0+1))
+    bM = (-1./6)*((y0-1.)**2)*(diff1*(y0-1.)+diff2*(3.*y0+3.))
+    !b0 =.5*((2.-y0)**2)*(diff1*(2-y0)/3.-diff2*y0)-((1.-y0)**2)*(2.*diff1*(1.-y0)/3.-2.*diff2*y0)
+    b0 =(y0**2)*((diff1*(0.5*y0-1.))+(diff2*(1.5*y0-2.))) + (diff1*2./3._WP)
+    !bP =.5*((2.-(1-y0))**2)*(diff1*(2-(1-y0))/3.-diff2*(1-y0))-((1.-(1-y0))**2)*(2.*diff1*(1.-(1-y0))/3.-2.*diff2*(1-y0))
+    bP = diff1*(y0*(y0*(0.5-0.5*y0)+0.5)+(1._WP/6._WP))+diff2*(y0*(y0*(2.5-1.5*y0)-0.5)-0.5)
+    !bP2= .5*((2.-(2-y0))**2)*(diff1*(2.-(2-y0))/3.-diff2*(2-y0))
+    bP2 = 0.5_WP*(y0**2)*((1._WP/3._WP)*diff1*y0 - diff2*(2.-y0))
+    !bP = 1._WP - (bM + b0 + bP2)
+
+
+    ! remeshing
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    buffer(j1) = buffer(j1) + sca*bM
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(j1) = buffer(j1) + sca*b0
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    buffer(j1) = buffer(j1) + sca*bP
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    buffer(j1) = buffer(j1) + sca*bP2
+
+end subroutine AC_remesh_Mprime4_diff_array
+
+
+!> M'4 remeshing formula with diffusion - version for array of pointer.
+!! @author Jean-baptiste Lagaert, LEGI
+!!      @param[in]       diff    = diffusivity
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_Mprime4_diff_pter(pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                                        :: pos_adim, sca
+    type(real_pter), dimension(:), intent(inout)                :: buffer
+    ! Other local variables
+    integer     :: j0                       ! indice of the nearest mesh points
+    real(WP)    :: bM, b0, bP, bP2          ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+    real(WP)    :: diff1, diff2             ! remeshing correction to take into account for diffusion term
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - dble(j0))
+
+    ! Compute coefficient for diffusion part
+    diff1 = 1.5*(1.-0.5*4.*sc_diff_dt_dx(sc_remesh_ind,current_dir))
+    diff2 = 0.5*(1.-1.5*4.*sc_diff_dt_dx(sc_remesh_ind,current_dir))
+
+    ! Interpolation weights
+    !bM = .5*((2.-(y0+1))**2)*(diff1*(2.-(y0+1))/3.-diff2*(y0+1))
+    bM = (-1./6)*((y0-1.)**2)*(diff1*(y0-1.)+diff2*(3.*y0+3.))
+    !b0 =.5*((2.-y0)**2)*(diff1*(2-y0)/3.-diff2*y0)-((1.-y0)**2)*(2.*diff1*(1.-y0)/3.-2.*diff2*y0)
+    b0 =(y0**2)*((diff1*(0.5*y0-1.))+(diff2*(1.5*y0-2.))) + (diff1*2./3._WP)
+    !bP =.5*((2.-(1-y0))**2)*(diff1*(2-(1-y0))/3.-diff2*(1-y0))-((1.-(1-y0))**2)*(2.*diff1*(1.-(1-y0))/3.-2.*diff2*(1-y0))
+    bP = diff1*(y0*(y0*(0.5-0.5*y0)+0.5)+(1._WP/6._WP))+diff2*(y0*(y0*(2.5-1.5*y0)-0.5)-0.5)
+    !bP2= .5*((2.-(2-y0))**2)*(diff1*(2.-(2-y0))/3.-diff2*(2-y0))
+    bP2 = 0.5_WP*(y0**2)*((1._WP/3._WP)*diff1*y0 - diff2*(2.-y0))
+    !bP = 1._WP - (bM + b0 + bP2)
+
+    ! remeshing
+    buffer(j0-1)%pter = buffer(j0-1)%pter + sca*bM
+    buffer(j0  )%pter = buffer(j0  )%pter + sca*b0
+    buffer(j0+1)%pter = buffer(j0+1)%pter + sca*bP
+    buffer(j0+2)%pter = buffer(j0+2)%pter + sca*bP2
+
+end subroutine AC_remesh_Mprime4_diff_pter
+
+
 !> M'6 remeshing formula - version for array of real
+!! @author Jean-Baptiste Lagaert, LEGI/LJK
 !!      @param[in]       dir     = current direction
 !!      @param[in]       pos_adim= adimensionned particle position
 !!      @param[in]       sca     = scalar advected by the particle
 !!      @param[in,out]   buffer  = temporaly remeshed scalar field
-subroutine AC_remesh_Mprime6_array(dir, pos_adim, sca, buffer)
+subroutine AC_remesh_Mstar6_array(dir, pos_adim, sca, buffer)
 
     use cart_topology
     use advec_variables ! contains info about solver parameters and others.
@@ -255,28 +377,29 @@ subroutine AC_remesh_Mprime6_array(dir, pos_adim, sca, buffer)
     b0 = 1. - (bM2+bM+bP+bP2+bP3)
 
     ! remeshing
-    j1 = modulo(j0-3,N(dir))+1  ! j0-2
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1  ! j0-2
     buffer(j1) = buffer(j1) + sca*bM2
-    j1 = modulo(j0-2,N(dir))+1  ! j0-1
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
     buffer(j1) = buffer(j1) + sca*bM
-    j1 = modulo(j0-1,N(dir))+1  ! j0
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
     buffer(j1) = buffer(j1) + sca*b0
-    j1 = modulo(j0,N(dir))+1    ! j0+1
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
     buffer(j1) = buffer(j1) + sca*bP
-    j1 = modulo(j0+1,N(dir))+1  ! j0+2
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
     buffer(j1) = buffer(j1) + sca*bP2
-    j1 = modulo(j0+2,N(dir))+1  ! j0+3
+    j1 = modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
     buffer(j1) = buffer(j1) + sca*bP3
 
-end subroutine AC_remesh_Mprime6_array
+end subroutine AC_remesh_Mstar6_array
 
 
 !> M'6 remeshing formula (order is more than 2, JM Ethancelin is working on
 !! determining order). - version for array of pointer
+!! @author Jean-Baptiste Lagaert, LEGI/LJK
 !!      @param[in]       pos_adim= adimensionned particle position
 !!      @param[in]       sca     = scalar advected by the particle
 !!      @param[in,out]   buffer  = temporaly remeshed scalar field
-subroutine AC_remesh_Mprime6_pter(pos_adim, sca, buffer)
+subroutine AC_remesh_Mstar6_pter(pos_adim, sca, buffer)
 
     use cart_topology
     use advec_variables ! contains info about solver parameters and others.
@@ -318,10 +441,107 @@ subroutine AC_remesh_Mprime6_pter(pos_adim, sca, buffer)
     buffer(j0+2)%pter = buffer(j0+2)%pter + sca*bP2
     buffer(j0+3)%pter = buffer(j0+3)%pter + sca*bP3
 
-end subroutine AC_remesh_Mprime6_pter
+end subroutine AC_remesh_Mstar6_pter
+
+
+!> Lambda(4,4) remeshing formula (without correction), order = 4 everywhere - version for array of real
+!! @author Jean-Baptiste Lagaert, LEGI/LJK
+!!      @param[in]       dir     = current direction
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_L4_4_array(dir, pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: j0, j1                   ! indice of the nearest mesh points
+    real(WP)    :: bM, bM2, b0, bP, bP2, bP3! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - dble(j0))
+
+    ! Interpolation weights
+    bM2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-46. * y0 + 207.) - 354.) + 273.) - 80.) + 1.) - 2.)- 1.) + 2.)) / 24.
+    bM  = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(230. * y0 - 1035.) +1770.) - 1365.) + 400.) - 4.) + 4.) + 16.) - 16.)) / 24.
+    b0  = (y0* y0*(y0*y0* (y0*(y0*(y0*(y0*(-460.* y0 + 2070.) - 3540.) + 2730.) - 800.) + 6.) - 30.)+ 24.) / 24.
+    bP  = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(460. * y0 - 2070.) + 3540.) - 2730.) + 800.) - 4.) - 4.) + 16.) + 16.)) / 24.
+    !bP2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0 * (-230. * y0 + 1035.) - 1770.) + 1365.) - 400.) + 1.) + 2.) - 1.) - 2.)) / 24.
+    bP3 = (y0*y0*y0*y0*y0*(y0*(y0 * (y0 * (46. * y0 - 207.) + 354.) - 273.) + 80.)) / 24.
+    bP2 = 1. - (bM2+bM+bP+b0+bP3)
+
+    ! remeshing
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1  ! j0-2
+    buffer(j1) = buffer(j1) + sca*bM2
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    buffer(j1) = buffer(j1) + sca*bM
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(j1) = buffer(j1) + sca*b0
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    buffer(j1) = buffer(j1) + sca*bP
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    buffer(j1) = buffer(j1) + sca*bP2
+    j1 = modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
+    buffer(j1) = buffer(j1) + sca*bP3
+
+end subroutine AC_remesh_L4_4_array
+
+
+!> Lambda(4,4) uncorrected remeshing formula (order 4 everywhere)  - version for array of pointer
+!! @author Jean-Baptiste Lagaert, LEGI/LJK
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_L4_4_pter(pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                                        :: pos_adim, sca
+    type(real_pter), dimension(:), intent(inout)                :: buffer
+    ! Other local variables
+    integer     :: j0                       ! indice of the nearest mesh points
+    real(WP)    :: bM, bM2, b0, bP, bP2, bP3! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - dble(j0))
+
+    ! Interpolation weights
+    bM2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-46. * y0 + 207.) - 354.) + 273.) - 80.) + 1.) - 2.)- 1.) + 2.)) / 24.
+    bM  = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(230. * y0 - 1035.) +1770.) - 1365.) + 400.) - 4.) + 4.) + 16.) - 16.)) / 24.
+    b0  = (y0* y0*(y0*y0* (y0*(y0*(y0*(y0*(-460.* y0 + 2070.) - 3540.) + 2730.) - 800.) + 6.) - 30.)+ 24.) / 24.
+    bP  = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(460. * y0 - 2070.) + 3540.) - 2730.) + 800.) - 4.) - 4.) + 16.) + 16.)) / 24.
+    !bP2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0 * (-230. * y0 + 1035.) - 1770.) + 1365.) - 400.) + 1.) + 2.) - 1.) - 2.)) / 24.
+    bP3 = (y0*y0*y0*y0*y0*(y0*(y0 * (y0 * (46. * y0 - 207.) + 354.) - 273.) + 80.)) / 24.
+    bP2 = 1. - (bM2+bM+bP+b0+bP3)
+
+    ! remeshing
+    buffer(j0-2)%pter = buffer(j0-2)%pter + sca*bM2
+    buffer(j0-1)%pter = buffer(j0-1)%pter + sca*bM
+    buffer(j0  )%pter = buffer(j0  )%pter + sca*b0
+    buffer(j0+1)%pter = buffer(j0+1)%pter + sca*bP
+    buffer(j0+2)%pter = buffer(j0+2)%pter + sca*bP2
+    buffer(j0+3)%pter = buffer(j0+3)%pter + sca*bP3
+
+end subroutine AC_remesh_L4_4_pter
 
 
 !> M'8 remeshing formula - version for array of pointer.
+!! @author Jean-Baptiste Lagaert, LEGI/LJK
 !!      @param[in]       dir     = current direction
 !!      @param[in]       pos_adim= adimensionned particle position
 !!      @param[in]       sca     = scalar advected by the particle
@@ -359,7 +579,7 @@ subroutine AC_remesh_Mprime8_array(dir, pos_adim, sca, buffer)
     bP3=y0*(y0*(y0*(y0*(y0*(y0*(-y0/48. + 3./32.) - 1./12.)       &
         & - 1./32.) - 1./48.) + 1./96.) + 1./60.) + 17./3360.
     ! bM2=(2-y0)**7/2688.-(y0+2)*(2-y0)**6/640.+(y0+2)**2*(2-y0)**5/960
-    !     -(1-y0)**7/336+(y0+2)*(1-y0)**6/80.-(y0+2)**2*(1-y0)**5/120.
+    !     -xx2**7/336+(y0+2)*xx2**6/80.-(y0+2)**2*xx2**5/120.
     bM2=y0*(y0*(y0*(y0*(y0*(y0*(y0/48. - 5./96.) - 1./24.)        &
         & + 11./48.) - 1./6.) - 5./48.) + 3./20.) - 17./560.
     ! bP2=(y0+2)**7/2688.-(2-y0)*(y0+2)**6/640.+(2-y0)**2*(y0+2)**5/960
@@ -389,27 +609,28 @@ subroutine AC_remesh_Mprime8_array(dir, pos_adim, sca, buffer)
     bP = 1. - bM3 - bM2 - bM - b0 - bP2 - bP3 - bP4
 
     ! remeshing
-    j1 = modulo(j0-4,N(dir))+1  ! j0-3
+    j1 = modulo(j0-4,mesh_sc%N(dir))+1  ! j0-3
     buffer(j1) = buffer(j1) + sca*bM3
-    j1 = modulo(j0-3,N(dir))+1  ! j0-2
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1  ! j0-2
     buffer(j1) = buffer(j1) + sca*bM2
-    j1 = modulo(j0-2,N(dir))+1  ! j0-1
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
     buffer(j1) = buffer(j1) + sca*bM
-    j1 = modulo(j0-1,N(dir))+1  ! j0
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
     buffer(j1) = buffer(j1) + sca*b0
-    j1 = modulo(j0,N(dir))+1    ! j0+1
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
     buffer(j1) = buffer(j1) + sca*bP
-    j1 = modulo(j0+1,N(dir))+1  ! j0+2
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
     buffer(j1) = buffer(j1) + sca*bP2
-    j1 = modulo(j0+2,N(dir))+1  ! j0+3
+    j1 = modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
     buffer(j1) = buffer(j1) + sca*bP3
-    j1 = modulo(j0+3,N(dir))+1  ! j0+4
+    j1 = modulo(j0+3,mesh_sc%N(dir))+1  ! j0+4
     buffer(j1) = buffer(j1) + sca*bP4
 
 end subroutine AC_remesh_Mprime8_array
 
 
 !> M'8 remeshing formula - version for array of pointer.
+!! @author Jean-Baptiste Lagaert, LEGI/LJK
 !!      @param[in]       dir     = current direction
 !!      @param[in]       pos_adim= adimensionned particle position
 !!      @param[in]       sca     = scalar advected by the particle
@@ -446,7 +667,7 @@ subroutine AC_remesh_Mprime8_pter(pos_adim, sca, buffer)
     bP3=y0*(y0*(y0*(y0*(y0*(y0*(-y0/48. + 3./32.) - 1./12.)       &
         & - 1./32.) - 1./48.) + 1./96.) + 1./60.) + 17./3360.
     ! bM2=(2-y0)**7/2688.-(y0+2)*(2-y0)**6/640.+(y0+2)**2*(2-y0)**5/960
-    !     -(1-y0)**7/336+(y0+2)*(1-y0)**6/80.-(y0+2)**2*(1-y0)**5/120.
+    !     -xx2**7/336+(y0+2)*xx2**6/80.-(y0+2)**2*xx2**5/120.
     bM2=y0*(y0*(y0*(y0*(y0*(y0*(y0/48. - 5./96.) - 1./24.)        &
         & + 11./48.) - 1./6.) - 5./48.) + 3./20.) - 17./560.
     ! bP2=(y0+2)**7/2688.-(2-y0)*(y0+2)**6/640.+(2-y0)**2*(y0+2)**5/960
diff --git a/HySoP/src/scalesInterface/particles/advec_remesh_lambda.f90 b/HySoP/src/scalesInterface/particles/advec_remesh_lambda.f90
index 8f68902e2..400375f38 100644
--- a/HySoP/src/scalesInterface/particles/advec_remesh_lambda.f90
+++ b/HySoP/src/scalesInterface/particles/advec_remesh_lambda.f90
@@ -1,3 +1,4 @@
+!USEFORTEST advec
 !> @addtogroup part
 
 !------------------------------------------------------------------------------
@@ -152,7 +153,6 @@ module advec_remeshing_lambda
     abstract interface
         subroutine AC_remesh_line_pter(direction, p_pos_adim, scal1D, bl_type, bl_tag, ind_min, buffer)
             use structure_tools
-            ! use precision_tools ! already include in structure tools
             use advec_variables
 
             implicit none
@@ -250,11 +250,11 @@ subroutine AC_remesh_lambda2corrected_pter(direction, p_pos_adim, scal1D, bl_typ
     ! Other local variables
     integer                                     :: bl_ind       ! indice of the current "block end".
     integer                                     :: p_ind        ! indice of the current particle
-    real(WP), dimension(N_proc(direction))      :: pos_translat ! translation of p_pos_adim as array indice are now starting from 1 and not ind_min
+    real(WP), dimension(mesh_sc%N_proc(direction))      :: pos_translat ! translation of p_pos_adim as array indice are now starting from 1 and not ind_min
 
     pos_translat = p_pos_adim - ind_min + 1
 
-    do p_ind = 1, N_proc(direction), bl_size
+    do p_ind = 1, mesh_sc%N_proc(direction), bl_size
         bl_ind = p_ind/bl_size + 1
         if (bl_tag(bl_ind)) then
             ! Tag case
@@ -302,7 +302,7 @@ end subroutine AC_remesh_lambda2corrected_pter
 !! tagged too.
 !! The following algorithm is write for block of minimal size.
 !! @author = Jean-Baptiste Lagaert, LEGI/Ljk
-subroutine AC_remesh_lambda2corrected_array(direction, p_pos_adim, scal1d, bl_type, bl_tag, remesh_buffer)
+subroutine ac_remesh_lambda2corrected_array(direction, p_pos_adim, scal1d, bl_type, bl_tag, remesh_buffer)
 
     use cart_topology   ! description of mesh and of mpi topology
     use advec_variables ! contains info about solver parameters and others.
@@ -310,7 +310,7 @@ subroutine AC_remesh_lambda2corrected_array(direction, p_pos_adim, scal1d, bl_ty
     ! input/output
     integer, intent(in)                                 :: direction
     real(wp), dimension(:), intent(in)                  :: p_pos_adim
-    real(wp), dimension(n_proc(direction)), intent(in)  :: scal1d
+    real(wp), dimension(mesh_sc%N_proc(direction)), intent(in)  :: scal1d
     logical, dimension(:), intent(in)                   :: bl_type
     logical, dimension(:), intent(in)                   :: bl_tag
     real(wp), dimension(:), intent(inout)               :: remesh_buffer
@@ -318,7 +318,7 @@ subroutine AC_remesh_lambda2corrected_array(direction, p_pos_adim, scal1d, bl_ty
     integer     :: bl_ind                               ! indice of the current "block end".
     integer     :: p_ind                                ! indice of the current particle
 
-    do p_ind = 1, N_proc(direction), bl_size
+    do p_ind = 1, mesh_sc%N_proc(direction), bl_size
         bl_ind = p_ind/bl_size + 1
         if (bl_tag(bl_ind)) then
             ! Tag case
@@ -331,13 +331,11 @@ subroutine AC_remesh_lambda2corrected_array(direction, p_pos_adim, scal1d, bl_ty
                 !end if
                 ! XXX Debug - end
             if (bl_type(bl_ind)) then
-               ! tagged, the first particle belong to a centered block and the last to left block.
-               call AC_remesh_tag_CL(direction, p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), &
-                    scal1D(p_ind+1), remesh_buffer)
+                ! tagged, the first particle belong to a centered block and the last to left block.
+                call AC_remesh_tag_CL(direction, p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), remesh_buffer)
             else
                 ! tagged, the first particle belong to a left block and the last to centered block.
-                call AC_remesh_tag_LC(direction, p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), &
-                     scal1D(p_ind+1), remesh_buffer)
+                call AC_remesh_tag_LC(direction, p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), remesh_buffer)
             end if
         else
             ! First particle
@@ -375,7 +373,7 @@ subroutine AC_remesh_lambda4corrected_array(direction, p_pos_adim, scal1D, bl_ty
     ! Input/Output
     integer, intent(in)                                 :: direction
     real(WP), dimension(:), intent(in)                  :: p_pos_adim
-    real(WP), dimension(N_proc(direction)), intent(in)  :: scal1D
+    real(WP), dimension(mesh_sc%N_proc(direction)), intent(in)  :: scal1D
     logical, dimension(:), intent(in)                   :: bl_type
     logical, dimension(:), intent(in)                   :: bl_tag
     real(WP), dimension(:), intent(inout)               :: remesh_buffer
@@ -383,7 +381,7 @@ subroutine AC_remesh_lambda4corrected_array(direction, p_pos_adim, scal1D, bl_ty
     integer     :: bl_ind                               ! indice of the current "block end".
     integer     :: p_ind                                ! indice of the current particle
 
-    do p_ind = 1, N_proc(direction), bl_size
+    do p_ind = 1, mesh_sc%N_proc(direction), bl_size
         bl_ind = p_ind/bl_size + 1
         if (bl_tag(bl_ind)) then
             ! Tagged case
@@ -452,11 +450,11 @@ subroutine AC_remesh_lambda4corrected_pter(direction, p_pos_adim, scal1D, bl_typ
     ! Other local variables
     integer     :: bl_ind                               ! indice of the current "block end".
     integer     :: p_ind                                ! indice of the current particle
-    real(WP), dimension(N_proc(direction))      :: pos_translat ! translation of p_pos_adim as array indice are now starting from 1 and not ind_min
+    real(WP), dimension(mesh_sc%N_proc(direction))      :: pos_translat ! translation of p_pos_adim as array indice are now starting from 1 and not ind_min
 
     pos_translat = p_pos_adim - ind_min + 1
 
-    do p_ind = 1, N_proc(direction), bl_size
+    do p_ind = 1, mesh_sc%N_proc(direction), bl_size
         bl_ind = p_ind/bl_size + 1
         if (bl_tag(bl_ind)) then
             ! Tagged case
@@ -532,11 +530,11 @@ subroutine AC_remesh_lambda2limited_pter(direction, p_pos_adim, scal1D, bl_type,
     ! Other local variables
     integer                                     :: bl_ind       ! indice of the current "block end".
     integer                                     :: p_ind        ! indice of the current particle
-    real(WP), dimension(N_proc(direction))      :: pos_translat ! translation of p_pos_adim as array indice are now starting from 1 and not ind_min
+    real(WP), dimension(mesh_sc%N_proc(direction))      :: pos_translat ! translation of p_pos_adim as array indice are now starting from 1 and not ind_min
 
     pos_translat = p_pos_adim - ind_min + 1
 
-    do p_ind = 1, N_proc(direction), bl_size
+    do p_ind = 1, mesh_sc%N_proc(direction), bl_size
         bl_ind = p_ind/bl_size + 1
         if (bl_tag(bl_ind)) then
             ! Tag case
@@ -599,8 +597,8 @@ subroutine AC_remesh_lambda2limited_array(direction, p_pos_adim, scal1d, bl_type
     ! input/output
     integer, intent(in)                                 :: direction
     real(wp), dimension(:), intent(in)                  :: p_pos_adim
-    real(wp), dimension(n_proc(direction)), intent(in)  :: scal1d
-    real(WP), dimension(:), intent(in)                          :: limit
+    real(wp), dimension(:), intent(in)                  :: scal1d
+    real(WP), dimension(:), intent(in)                  :: limit
     logical, dimension(:), intent(in)                   :: bl_type
     logical, dimension(:), intent(in)                   :: bl_tag
     real(wp), dimension(:), intent(inout)               :: remesh_buffer
@@ -608,7 +606,7 @@ subroutine AC_remesh_lambda2limited_array(direction, p_pos_adim, scal1d, bl_type
     integer     :: bl_ind                               ! indice of the current "block end".
     integer     :: p_ind                                ! indice of the current particle
 
-    do p_ind = 1, N_proc(direction), bl_size
+    do p_ind = 1, mesh_sc%N_proc(direction), bl_size
         bl_ind = p_ind/bl_size + 1
         if (bl_tag(bl_ind)) then
             ! Tag case
@@ -631,11 +629,9 @@ subroutine AC_remesh_lambda2limited_array(direction, p_pos_adim, scal1d, bl_type
             end if
         else
             ! First particle
-            call AC_remesh_limitO2(direction, p_pos_adim(p_ind),scal1D(p_ind), bl_type(bl_ind), &
-                 limit(p_ind:p_ind+1), remesh_buffer)
+            call AC_remesh_limitO2(direction, p_pos_adim(p_ind),scal1D(p_ind), bl_type(bl_ind), limit(p_ind:p_ind+1), remesh_buffer)
             ! Second particle is remeshed with left formula
-            call AC_remesh_limitO2(direction, p_pos_adim(p_ind+1),scal1D(p_ind+1), bl_type(bl_ind+1), &
-                 limit(p_ind+1:p_ind+2), remesh_buffer)
+            call AC_remesh_limitO2(direction, p_pos_adim(p_ind+1),scal1D(p_ind+1), bl_type(bl_ind+1), limit(p_ind+1:p_ind+2), remesh_buffer)
         end if
     end do
 
@@ -692,11 +688,11 @@ subroutine AC_remesh_O2_array(dir, pos_adim, sca, bl_type, buffer)
     bP=1. - (b0+bM)
 
     ! remeshing
-    j1 = modulo(j0-2,N(dir))+1 ! j0-1
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1 ! j0-1
     buffer(j1) = buffer(j1) + bM*sca
-    j1 = modulo(j0-1,N(dir))+1 ! j0
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1 ! j0
     buffer(j1) = buffer(j1) + b0*sca
-    j1 = modulo(j0,N(dir))+1   ! j0+1
+    j1 = modulo(j0,mesh_sc%N(dir))+1   ! j0+1
     buffer(j1) = buffer(j1) + bP*sca
 
 end subroutine AC_remesh_O2_array
@@ -792,9 +788,9 @@ subroutine AC_remesh_tag_CL_array(dir, pos_adim, sca, posP_ad, scaP, buffer)
     b0=1.-bP
 
     ! Remeshing
-    jM = modulo(j0-2,N(dir))+1  ! j0-1
-    jP = modulo(j0,N(dir))+1    ! j0+1
-    j0 = modulo(j0-1,N(dir))+1  ! j0
+    jM = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    jP = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    j0 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
     buffer(jM)=buffer(jM)+aM*sca
     buffer(j0)=buffer(j0)+a0*sca+b0*scaP
     buffer(jP)=buffer(jP)+bP*scaP
@@ -913,11 +909,11 @@ subroutine AC_remesh_tag_LC_array(dir, pos_adim, sca, posP_ad, scaP, buffer)
     bP3=b0
 
     ! Remeshing
-    jM = modulo(j0-2,N(dir))+1  ! j0-1
-    jP = modulo(j0,N(dir))+1    ! j0+1
-    jP2= modulo(j0+1,N(dir))+1  ! j0+2
-    jP3= modulo(j0+2,N(dir))+1  ! j0+3
-    j0 = modulo(j0-1,N(dir))+1  ! j0
+    jM = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    jP = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    jP2= modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    jP3= modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
+    j0 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
     buffer(jM)= buffer(jM)+aM*sca
     buffer(j0)= buffer(j0)+a0*sca+b0*scaP
     buffer(jP)= buffer(jP)+aP*sca+bP*scaP
@@ -1045,11 +1041,11 @@ subroutine AC_remesh_limitO2_array(dir, pos_adim, sca, bl_type, limit, buffer)
     bP=1. - (b0+bM)
 
     ! remeshing
-    j1 = modulo(j0-2,N(dir))+1 ! j0-1
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1 ! j0-1
     buffer(j1) = buffer(j1) + bM*sca
-    j1 = modulo(j0-1,N(dir))+1 ! j0
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1 ! j0
     buffer(j1) = buffer(j1) + b0*sca
-    j1 = modulo(j0,N(dir))+1   ! j0+1
+    j1 = modulo(j0,mesh_sc%N(dir))+1   ! j0+1
     buffer(j1) = buffer(j1) + bP*sca
 
 end subroutine AC_remesh_limitO2_array
@@ -1159,9 +1155,9 @@ subroutine AC_remesh_limitO2_tag_CL_array(dir, pos_adim, sca, posP_ad, scaP, lim
     b0=1.-bP
 
     ! Remeshing
-    jM = modulo(j0-2,N(dir))+1  ! j0-1
-    jP = modulo(j0,N(dir))+1    ! j0+1
-    j0 = modulo(j0-1,N(dir))+1  ! j0
+    jM = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    jP = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    j0 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
     buffer(jM)=buffer(jM)+aM*sca
     buffer(j0)=buffer(j0)+a0*sca+b0*scaP
     buffer(jP)=buffer(jP)+bP*scaP
@@ -1287,11 +1283,11 @@ subroutine AC_remesh_limitO2_tag_LC_array(dir, pos_adim, sca, posP_ad, scaP, lim
     b0 = 1._WP - bP - bP2 - bP3
 
     ! Remeshing
-    jM = modulo(j0-2,N(dir))+1  ! j0-1
-    jP = modulo(j0,N(dir))+1    ! j0+1
-    jP2= modulo(j0+1,N(dir))+1  ! j0+2
-    jP3= modulo(j0+2,N(dir))+1  ! j0+3
-    j0 = modulo(j0-1,N(dir))+1  ! j0
+    jM = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    jP = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    jP2= modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    jP3= modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
+    j0 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
     buffer(jM)= buffer(jM)  +aM *sca
     buffer(j0)= buffer(j0)  +a0 *sca+b0 *scaP
     buffer(jP)= buffer(jP)  +aP *sca+bP *scaP
@@ -1411,15 +1407,15 @@ subroutine AC_remesh_O4_left_array(dir, pos_adim, sca, buffer)
     b0 = 1. -(bM2+bM+bP+bP2)
 
     ! remeshing
-    j1 = modulo(j0-3,N(dir))+1  ! j0-2
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1  ! j0-2
     buffer(j1) = buffer(j1) + bM2*sca
-    j1 = modulo(j0-2,N(dir))+1  ! j0-1
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
     buffer(j1) = buffer(j1) + bM*sca
-    j1 = modulo(j0-1,N(dir))+1  ! j0
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
     buffer(j1) = buffer(j1) + b0*sca
-    j1 = modulo(j0,N(dir))+1    ! j0+1
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
     buffer(j1) = buffer(j1) + bP*sca
-    j1 = modulo(j0+1,N(dir))+1  ! j0+2
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
     buffer(j1) = buffer(j1) + bP2*sca
 
 end subroutine AC_remesh_O4_left_array
@@ -1505,15 +1501,15 @@ subroutine AC_remesh_O4_center_array(dir, pos_adim, sca, buffer)
     b0 = 1._WP -(bM2+bM+bP+bP2)
 
     ! remeshing
-    j1 = modulo(j0-3,N(dir))+1  ! j0-2
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1  ! j0-2
     buffer(j1) = buffer(j1) + bM2*sca
-    j1 = modulo(j0-2,N(dir))+1  ! j0-1
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
     buffer(j1) = buffer(j1) + bM*sca
-    j1 = modulo(j0-1,N(dir))+1  ! j0
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
     buffer(j1) = buffer(j1) + b0*sca
-    j1 = modulo(j0,N(dir))+1    ! j0+1
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
     buffer(j1) = buffer(j1) + bP*sca
-    j1 = modulo(j0+1,N(dir))+1  ! j0+2
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
     buffer(j1) = buffer(j1) + bP2*sca
 
 end subroutine AC_remesh_O4_center_array
@@ -1536,7 +1532,7 @@ subroutine AC_remesh_O4_center_pter(pos_adim, sca, buffer)
     real(WP)    :: y0                       ! adimensionned distance to mesh points
     ! Mesh point used in remeshing formula
     j0 = nint(pos_adim)
-
+            
     ! Distance to mesh points
     y0 = (pos_adim - dble(j0))
 
@@ -1559,7 +1555,7 @@ subroutine AC_remesh_O4_center_pter(pos_adim, sca, buffer)
     buffer(j0+1)%pter = buffer(j0+1)%pter   + bP*sca
     buffer(j0+2)%pter = buffer(j0+2)%pter   + bP2*sca
 
-
+            
 end subroutine AC_remesh_O4_center_pter
 
 
@@ -1653,25 +1649,25 @@ subroutine AC_remesh_O4_tag_CL_array(dir, posM_ad, scaM, pos_adim, sca, posP_ad,
 
     ! -- remeshing -- 
     ! j0-3
-    j1 = modulo(j0-4,N(dir))+1
+    j1 = modulo(j0-4,mesh_sc%N(dir))+1
     buffer(j1) = buffer(j1) + aM3*scaM
     ! j0-2
-    j1 = modulo(j0-3,N(dir))+1  
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1  
     buffer(j1) = buffer(j1) + aM2*scaM + bM2*sca
     ! j0-1
-    j1 = modulo(j0-2,N(dir))+1
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1
     buffer(j1) = buffer(j1) + aM*scaM  + bM*sca   + cM*scaP
     ! j0
-    j1 = modulo(j0-1,N(dir))+1
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1
     buffer(j1) = buffer(j1) + a0*scaM  + b0*sca   + c0*scaP  + e0*scaP2
     ! j0+1
-    j1 = modulo(j0,N(dir))+1    ! j0+1
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
     buffer(j1) = buffer(j1)            + bP*sca   + cP*scaP  + eP*scaP2
     ! j0+2
-    j1 = modulo(j0+1,N(dir))+1  ! j0+2
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
     buffer(j1) = buffer(j1)                       + cP2*scaP + ep2*scaP2
     ! j0+3
-    j1 = modulo(j0+2,N(dir))+1  ! j0+3
+    j1 = modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
     buffer(j1) = buffer(j1)                                  + ep3*scaP2
 
 end subroutine AC_remesh_O4_tag_CL_array
@@ -1892,31 +1888,31 @@ subroutine AC_remesh_O4_tag_LC_array(dir, posM_ad, scaM, pos_adim, sca, posP_ad,
 
     ! remeshing
     ! j0-3
-    j1 = modulo(j0-4,N(dir))+1
+    j1 = modulo(j0-4,mesh_sc%N(dir))+1
     buffer(j1) = buffer(j1) + aM3*scaM
     ! j0-2
-    j1 = modulo(j0-3,N(dir))+1  
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1
     buffer(j1) = buffer(j1) + aM2*scaM + bM2*sca
     ! j0-1
-    j1 = modulo(j0-2,N(dir))+1
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1
     buffer(j1) = buffer(j1) + aM*scaM  + bM*sca   + cM*scaP
     ! j0
-    j1 = modulo(j0-1,N(dir))+1
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1
     buffer(j1) = buffer(j1) + a0*scaM  + b0*sca   + c0*scaP  + e0*scaP2
     ! j0+1
-    j1 = modulo(j0,N(dir))+1    ! j0+1
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
     buffer(j1) = buffer(j1) + aP*scaM  + bP*sca   + cP*scaP  + eP*scaP2
     ! j0+2
-    j1 = modulo(j0+1,N(dir))+1  ! j0+2
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
     buffer(j1) = buffer(j1)  + aP2*scaM + bP2*sca + cP2*scaP + ep2*scaP2
     ! j0+3
-    j1 = modulo(j0+2,N(dir))+1  ! j0+3
+    j1 = modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
     buffer(j1) = buffer(j1)             + bP3*sca + cP3*scaP + ep3*scaP2
     ! j0+3
-    j1 = modulo(j0+3,N(dir))+1  ! j0+3
+    j1 = modulo(j0+3,mesh_sc%N(dir))+1  ! j0+3
     buffer(j1) = buffer(j1)                       + cP4*scaP + ep4*scaP2
     ! j0+3
-    j1 = modulo(j0+4,N(dir))+1  ! j0+5
+    j1 = modulo(j0+4,mesh_sc%N(dir))+1  ! j0+5
     buffer(j1) = buffer(j1)                                  + ep5*scaP2
 
 end subroutine AC_remesh_O4_tag_LC_array
diff --git a/HySoP/src/scalesInterface/particles/advec_type.f90 b/HySoP/src/scalesInterface/particles/advec_type.f90
index 6d3a8784b..6a799123b 100644
--- a/HySoP/src/scalesInterface/particles/advec_type.f90
+++ b/HySoP/src/scalesInterface/particles/advec_type.f90
@@ -1,3 +1,4 @@
+!USEFORTEST advec
 !> @addtogroup part
 !! @{
 !------------------------------------------------------------------------------
@@ -25,6 +26,26 @@ module advec_abstract_proc
     implicit none
 
 
+    ! --- Abstract profile of subroutine used as wrapper for remeshing ---
+    ! Such a procedure will call all the needed other subroutine to
+    ! remesh in a buffer (procedure itself use a AC_remesh_line_pter subroutine)
+    ! and to redristibute this buffer into the scalar field (and deal with
+    ! all the communication)
+    abstract interface
+      subroutine AC_init_p_V(V_comp, j, k, Gsize, p_V)
+
+        use precision_tools
+        implicit none
+
+        ! Input/Output
+        integer, intent(in)                       :: j,k
+        integer, dimension(2), intent(in)         :: Gsize
+        real(WP), dimension(:,:,:),intent(out)    :: p_V
+        real(WP), dimension(:,:,:), intent(in)    :: V_comp
+
+      end subroutine AC_init_p_V
+    end interface
+
     ! --- Abstract profile of subroutine used as wrapper for remeshing ---
     ! Such a procedure will call all the needed other subroutine to
     ! remesh in a buffer (procedure itself use a AC_remesh_line_pter subroutine)
@@ -88,15 +109,15 @@ module advec_abstract_proc
             integer, dimension(2), intent(in)           :: gs
             integer, intent(in)                         :: j, k
             integer, intent(in)                         :: ind_min
-            real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim     ! adimensionned particles position
-            logical, dimension(:,:,:), intent(in)       :: bl_type        ! is the particle block a center block or a left one ?
-            logical, dimension(:,:,:), intent(in)       :: bl_tag         ! indice of tagged particles
-            integer, dimension(:,:), intent(in)         :: send_min       ! distance between me and processus wich send me information
-            integer, dimension(:,:), intent(in)         :: send_max       ! distance between me and processus wich send me information
-            real(WP), dimension(:,:,:), intent(inout)   :: scalar         ! the initial scalar field transported by particles
-            real(WP),dimension(:), intent(out), target  :: buffer         ! buffer where particles are remeshed
-            integer, dimension(:), intent(inout)        :: pos_in_buffer  ! describe how the one dimensionnal array "buffer" are split
-                                                                          ! in part corresponding to different processes
+            real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+            logical, dimension(:,:,:), intent(in)       :: bl_type      ! is the particle block a center block or a left one ?
+            logical, dimension(:,:,:), intent(in)       :: bl_tag       ! indice of tagged particles
+            integer, dimension(:,:), intent(in)         :: send_min     ! distance between me and processus wich send me information
+            integer, dimension(:,:), intent(in)         :: send_max     ! distance between me and processus wich send me information
+            real(WP), dimension(:,:,:), intent(inout)   :: scalar       ! the initial scalar field transported by particles
+            real(WP),dimension(:), intent(out), target  :: buffer       ! buffer where particles are remeshed
+            integer, dimension(:), intent(inout)        :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
+                                                                        ! in part corresponding to different processes
 
         end subroutine remesh_in_buffer_type
     end interface
@@ -169,7 +190,7 @@ module advec_abstract_proc
             integer, dimension(:,:), intent(in)         :: cartography
             real(WP),dimension(:), intent(in)           :: buffer       ! buffer containing the data to redistribute into the local scalar field.
             real(WP), dimension(:,:,:), intent(inout)   :: scalar       ! the scalar field.
-            integer, intent(out)                        :: beg_buffer   ! first indice inside where the scalar values are stored into the buffer
+            integer, intent(inout)                      :: beg_buffer   ! first indice inside where the scalar values are stored into the buffer
                                                                         ! for the current sender processus. To know where reading data into the buffer.
         end subroutine remesh_buffer_to_scalar
     end interface
diff --git a/HySoP/src/scalesInterface/particles/advec_variables.f90 b/HySoP/src/scalesInterface/particles/advec_variables.f90
index 3f99c99ce..6e0f30223 100644
--- a/HySoP/src/scalesInterface/particles/advec_variables.f90
+++ b/HySoP/src/scalesInterface/particles/advec_variables.f90
@@ -1,3 +1,5 @@
+!USEFORTEST advec
+!USEFORTEST interpolation
 !> @addtogroup part
 !! @{
 !------------------------------------------------------------------------------
@@ -36,16 +38,15 @@ module advec_variables
     integer, public                             :: send_j_min
     !> maximal indice of the send buffer
     integer, public                             :: send_j_max
-    !> minimal indice used in remeshing of each line
-    integer,public,dimension(:,:),allocatable   :: send_group_min
-    !> maximal indice used in remeshing of each line
-    integer,public,dimension(:,:),allocatable   :: send_group_max
+    !> To take in account diffusion inside remeshing
+    real(WP), protected, dimension(:,:), allocatable :: sc_diff_dt_dx
+
 
     ! ------ Solver context -----
     ! solver choosen
     character(len=str_short), protected         :: type_solv
     integer, dimension(2), protected            :: remesh_stencil
-    ! ------ Block infromation -----
+    ! ------ Remeshing information -----
     !> number of particles in a block
     integer, protected                          :: bl_size
     !> distance between the "central" mesh point and the extream mesh point of the stencil of points used to remesh a particle
@@ -55,6 +56,8 @@ module advec_variables
     integer, dimension(2), protected            :: bl_remesh_superposition
     !> Number of block on each processus along each direction
     integer, dimension(3), protected            :: bl_nb
+    !> Maximum CFL number allowed by communications for the current parameters
+    integer, protected                          :: CFL_max
 
     ! ------ To ensure unique mpi message tag -----
     ! Tag generate with a proc_gap
@@ -108,9 +111,8 @@ subroutine AC_solver_init(part_solv, verbosity)
     verbose = .true.
     if (present(verbosity)) verbose = verbosity
 
-    if (present(part_solv)) type_solv = part_solv
-
     ! Initialisation part adapted to each method
+    if (present(part_solv)) type_solv = part_solv
     select case(type_solv)
         case('p_O2')
             bl_size = 2
@@ -131,23 +133,33 @@ subroutine AC_solver_init(part_solv, verbosity)
                 write(*,'(6x,a)') '====================================='
             end if
         case('p_M4')
-            bl_size = 2
+            bl_size = 1!2
             bl_bound_size = 2   ! Be aware : don't use it to compute superposition between
                                 ! mpi processes (not as predictible as corrected scheme)
             remesh_stencil = (/1,2/)
             if ((cart_rank==0).and.(verbose)) then
                 write(*,'(6x,a)') '========== Advection scheme ========='
-                write(*,'(6x,a)') ' particle method, corrected M prime 4'
+                write(*,'(6x,a)') ' particle method,           M prime 4'
                 write(*,'(6x,a)') '====================================='
             end if
         case('p_M6')
-            bl_size = 2
+            bl_size = 1!2
+            bl_bound_size = 3   ! Be aware : don't use it to compute superposition between
+                                ! mpi processes (not as predictible as corrected scheme)
+            remesh_stencil = (/2,3/)
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '========== Advection scheme ========='
+                write(*,'(6x,a)') ' particle method,           M prime 6'
+                write(*,'(6x,a)') '====================================='
+            end if
+        case('p_L4')
+            bl_size = 1!2
             bl_bound_size = 3   ! Be aware : don't use it to compute superposition between
                                 ! mpi processes (not as predictible as corrected scheme)
             remesh_stencil = (/2,3/)
             if ((cart_rank==0).and.(verbose)) then
                 write(*,'(6x,a)') '========== Advection scheme ========='
-                write(*,'(6x,a)') ' particle method, corrected M prime 6'
+                write(*,'(6x,a)') '     particle method, Lambda 4,4     '
                 write(*,'(6x,a)') '====================================='
             end if
         case('p_M8')
@@ -157,9 +169,19 @@ subroutine AC_solver_init(part_solv, verbosity)
             remesh_stencil = (/3,4/)
             if ((cart_rank==0).and.(verbose)) then
                 write(*,'(6x,a)') '========== Advection scheme ========='
-                write(*,'(6x,a)') ' particle method, corrected M prime 8'
+                write(*,'(6x,a)') ' particle method,           M prime 8'
                 write(*,'(6x,a)') '====================================='
             end if
+        case('d_M4')
+            bl_size = 1!2
+            bl_bound_size = 2   ! Be aware : don't use it to compute superposition between
+                                ! mpi processes (not as predictible as corrected scheme)
+            remesh_stencil = (/1,2/)
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '============= Advection scheme ==========='
+                write(*,'(6x,a)') ' particle method, M prime 4 with diffusion'
+                write(*,'(6x,a)') '=========================================='
+            end if
         case default
             bl_size = 2
             bl_bound_size = 1
@@ -172,13 +194,21 @@ subroutine AC_solver_init(part_solv, verbosity)
     end select
 
     ! Check if the subdomain contain a number of mesh wich could be divided by bl_size
-    if ((modulo(N_proc(1),bl_size)/=0).OR.(modulo(N_proc(2),bl_size)/=0).OR.(modulo(N_proc(3),bl_size)/=0)) then
+    if ((modulo(mesh_sc%N_proc(1),bl_size)/=0).OR.  &
+      & (modulo(mesh_sc%N_proc(2),bl_size)/=0).OR.  &
+      & (modulo(mesh_sc%N_proc(3),bl_size)/=0)) then
         if (cart_rank ==0) print*, 'Number of mesh by processus must be a muliple of ', bl_size
         stop
     end if
 
     ! Compute local number of block along each direction
-    bl_nb = N_proc/bl_size
+    bl_nb = mesh_sc%N_proc/bl_size
+
+    ! Compute maximal CFL number
+    CFL_max = minval(mesh_sc%N_proc)*(size(neighbors)/2)
+
+    ! To take in account for diffusion during the remeshing operation
+    if(.not. allocated(sc_diff_dt_dx)) allocate(sc_diff_dt_dx(1,3))
 
 end subroutine AC_solver_init
 
@@ -194,5 +224,34 @@ subroutine AC_set_part_bound_size(bound_size)
 
 end subroutine AC_set_part_bound_size
 
+!> Set manually the diffusion parameter for taking into account diffusion
+!! directly in remeshing.
+subroutine AC_set_diff_dt_dx(sc_diff)
+
+  use cart_topology
+
+  ! Input/Output
+  real(WP), dimension(:), intent(in)  ::  sc_diff
+  ! Local
+  integer                             :: ind
+! character(len=10)                 :: format_out
+
+  if (size(sc_diff_dt_dx,1) /= size(sc_diff)) then
+    deallocate(sc_diff_dt_dx)
+    allocate(sc_diff_dt_dx(size(sc_diff),3))
+  end if
+  do ind =1, 3
+    sc_diff_dt_dx(:,ind) = sc_diff/(mesh_sc%dx(ind)**2)
+  end do
+
+! if(cart_rank==0) then
+!   write(format_out,'(a,i0,a)') '(a,', size(sc_diff_dt_dx,1), 'g15.8)'
+!   write(*,format_out) 'diff along X = ', sc_diff_dt_dx(:,1)
+!   write(*,format_out) 'diff along Y = ', sc_diff_dt_dx(:,2)
+!   write(*,format_out) 'diff along Z = ', sc_diff_dt_dx(:,3)
+! end if
+
+end subroutine AC_set_diff_dt_dx
+
 
 end module advec_variables
diff --git a/HySoP/src/scalesInterface/particles/interpolation_velo.F90 b/HySoP/src/scalesInterface/particles/interpolation_velo.F90
new file mode 100644
index 000000000..41c2e6222
--- /dev/null
+++ b/HySoP/src/scalesInterface/particles/interpolation_velo.F90
@@ -0,0 +1,896 @@
+!USEFORTEST interpolation
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_common_velo
+!
+!
+! DESCRIPTION:
+!> The module ``advec_common_velo'' gather function and subroutines used to interpolate
+!! velocity at particle position which are not specific to a direction
+!! @details
+!! This module gathers functions and routines used to advec scalar which are not
+!! specific to a direction. This is a parallel implementation using MPI and
+!! the cartesien topology it provides. It also contains the variables common to
+!! the solver along each direction and other generic variables used for the
+!! advection based on the particle method.
+!!
+!! Except for testing purpose, this module is not supposed to be used by the
+!! main code but only by the other advection module. More precisly, an final user
+!! must only used the generic "advec" module wich contain all the interface to
+!! solve the advection equation with the particle method, and to choose the
+!! remeshing formula, the dimensionnal splitting and everything else. Except for
+!! testing purpose, the other advection modules have only to include
+!! "advec_common".
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module Interpolation_velo
+
+    use precision_tools
+    use cart_topology
+
+    implicit none
+
+    public
+
+
+    ! ===== Public procedures =====
+    !----- To interpolate velocity -----
+
+    ! ===== Public variables =====
+    procedure(weight_M4), pointer,  public :: get_weight => null()
+
+    ! ===== Private procedures =====
+    private :: weight_M4, weight_Mprime4, weight_Lambda4_4
+
+    ! ===== Private variables =====
+    character(len=4), protected :: interpol = 'Mp4'
+    integer, protected :: stencil_size = 4
+    integer, protected :: stencil_g = 1
+    integer, protected :: stencil_d = 2
+
+
+contains
+
+! ===== Public procedure =====
+
+! ============================================================
+! ====================     Initialisation ====================
+! ============================================================
+!> To choose interpolation formula
+subroutine interpol_init(formula, verbose)
+
+    character(len=*), optional, intent(in)  ::  formula
+    logical, optional, intent(in)           ::  verbose
+
+    logical :: verbosity
+
+    if(present(formula)) then
+      interpol = formula
+    else
+      interpol = 'Mp4'
+    end if
+
+    verbosity = .false.
+    if(present(verbose)) verbosity = verbose
+
+    select case(trim(interpol))
+    case('L4_4')
+      stencil_size = 6
+      stencil_d = 3
+      stencil_g = 2
+      get_weight => weight_Lambda4_4
+      if ((cart_rank==0).and.(verbosity)) &
+        & write(*,'(6x,a)') '============= Interpolation = Lambda 4,4 ==========='
+    case('M4')
+      stencil_size = 4
+      stencil_d = 2
+      stencil_g = 1
+      get_weight => weight_M4
+      if ((cart_rank==0).and.(verbosity)) &
+        & write(*,'(6x,a)') '============= Interpolation = M 4 ==========='
+    case default
+      stencil_size = 4
+      stencil_d = 2
+      stencil_g = 1
+      get_weight => weight_Mprime4
+      if ((cart_rank==0).and.(verbosity)) &
+        & write(*,'(6x,a)') '============= Interpolation = Mprime 4 ==========='
+    end select
+
+end subroutine interpol_init
+
+! ==========================================================================================
+! ====================     Interpolation of each velocity component     ====================
+! ==========================================================================================
+! Except for test purpose, only these brick must
+
+! For advection solver
+subroutine Interpol_2D_3D_vect(dx_f, dx_c, Vx, Vy, Vz, Vx_c, Vx_f, Vy_c, Vy_f, Vz_c, Vz_f)
+
+  real(WP), dimension(3), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: Vx, Vy, Vz
+  real(WP), dimension(:,:,:),intent(inout)  :: Vx_c, Vy_c, Vz_c
+  real(WP), dimension(:,:,:),intent(inout)  :: Vx_f, Vy_f, Vz_f
+
+  call Interpol_2D_vect(dx_f, dx_c, Vx, Vy, Vz, Vx_c, Vy_c, Vz_c)
+
+  call Inter_FirstDir_no_com(Vx_c, dx_c(1), Vx_f, dx_f(1))
+
+  call Inter_FirstDir_com(2, Vy_c, dx_c(2), Vy_f, dx_f(2))
+  call Inter_FirstDir_com(3, Vz_c, dx_c(3), Vz_f, dx_f(3))
+
+end subroutine Interpol_2D_3D_vect
+
+
+
+!> Interpolate each componnent of a vector along a transverse direction.
+!!    @param[in]        dx_c        = space step on the coarse grid (for last direction)
+!!    @param[in]        dx_f        = space step on the fine grid (for last direction)
+!!    @param[in]        Vx          = vector component along X
+!!    @param[in]        Vy          = vector component along Y
+!!    @param[in]        Vz          = vector component along Z
+!!    @param[out]       InterX      = interpolation ov Vx along Y and Z
+!!    @param[out]       InterY      = interpolation ov VY along X and Z
+!!    @param[out]       InterZ      = interpolation ov VZ along X and Y
+subroutine Interpol_2D_vect(dx_f, dx_c, Vx, Vy, Vz, InterX, InterY, InterZ)
+
+  real(WP), dimension(3), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: Vx, Vy, Vz
+  real(WP), dimension(:,:,:),intent(inout)  :: InterX, InterY, InterZ
+  ! Local variable
+  real(WP), dimension(2)                    :: d_f, d_c
+
+  ! For Vx, interpolation along Y and Z
+  call Inter_YZ(Vx, dx_c(2:3), InterX, dx_f(2:3))
+  ! For Vy, interpolation along Z (with communications) then along X (no communication required)
+  d_c = (/dx_c(1), dx_c(3)/)
+  d_f = (/dx_f(1), dx_f(3)/)
+  call Inter_XZ_permut(Vy, d_c, InterY, d_f)
+  ! For Vz, interpolation along Y (with communications) then along X (no communication required)
+  call Inter_XY_permut(Vz, d_c(1:2), InterZ, d_f(1:2))
+
+end subroutine Interpol_2D_vect
+
+!> 3D interpolation of a field to a finer grid - no transpositions.
+!!    @param[in]        V_coarse    = velocity to interpolate along the last direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for last direction)
+!!    @param[in]        V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for last direction)
+subroutine Interpol_3D(V_coarse, dx_c, V_fine, dx_f)
+
+  real(WP), dimension(3), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local variable
+  real(WP), dimension(2)                    :: d_f, d_c
+  real(WP), dimension(size(V_fine,1),size(V_coarse,2),size(V_coarse,3))    :: V_middle ! to save result of interpolation along X
+
+  ! Interpolate along X
+  call Inter_FirstDir_no_com(V_coarse, dx_c(1), V_middle, dx_f(1))
+
+  ! And then along Y and Z
+  call Inter_YZ(V_middle, dx_c(2:3), V_fine, dx_f(2:3))
+
+end subroutine Interpol_3D
+
+! ========================================================================
+! ====================        2D interpolation        ====================
+! ========================================================================
+
+!> Interpolate a field (ordannate along X,Y,Z) along X and Y-axis
+!!    @param[in]        V_coarse    = velocity to interpolate along the last direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for last direction)
+!!    @param[in]        V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along the third
+!! direction.
+subroutine Inter_XY(V_coarse, dx_c, V_fine, dx_f)
+
+  use mpi
+
+  ! Input/Output
+  real(WP), dimension(2), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local
+  real(WP), dimension(size(V_coarse,1),size(V_coarse,3),size(V_coarse,2))  :: V_permut ! permutation required for first interpolation
+  real(WP), dimension(size(V_coarse,1),size(V_fine,2),size(V_coarse,3))    :: V_middle ! to save result of interpolation along Z + permutation
+  integer :: ind  ! loop indice
+
+  ! Check field sizes
+  if(.not.(size(V_fine,3)==size(V_coarse,3))) then
+    write(*,'(a)') '[ERROR] Interpolation along XY : V_coarse and V_fine does not have the same resolution along Z axis'
+    stop
+  end if
+
+  ! Permutation to prepare first interpolation
+  do ind = 1, size(V_coarse,3)
+    V_permut(:,ind,:) = V_coarse(:,:,ind)
+  end do
+
+  ! Interpolation along last direction = Y-direction + permutation to re-order indices
+  call Inter_LastDir_Permut_com(2, V_permut, dx_c(2), V_middle, dx_f(2))
+
+  ! Interpolation along X = first direction
+  call Inter_FirstDir_no_com(V_middle, dx_c(1), V_fine, dx_f(1))
+
+end subroutine Inter_XY
+
+
+!> Interpolate a field (ordannate along X,Y,Z) along X and Y-axis + permutation
+!! in order to get a field sotred along (Z,X,Y)
+!!    @param[in]        V_coarse    = velocity to interpolate along the last direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for last direction)
+!!    @param[in]        V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along the third
+!! direction.
+subroutine Inter_XY_permut(V_coarse, dx_c, V_fine, dx_f)
+
+  use mpi
+
+  ! Input/Output
+  real(WP), dimension(2), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local
+  real(WP), dimension(size(V_coarse,1),size(V_coarse,3),size(V_coarse,2))  :: V_permut ! permutation required for first interpolation
+  real(WP), dimension(size(V_coarse,1),size(V_coarse,3),size(V_fine,3))    :: V_middle ! to save result of interpolation along Z + permutation
+  integer :: ind  ! loop indice
+
+  ! Check field sizes
+  if(.not.(size(V_fine,1)==size(V_coarse,3))) then
+    write(*,'(a)') '[ERROR] Interpolation along XY : V_coarse and V_fine does not have the same resolution along Z axis'
+    stop
+  end if
+
+  ! Permutation to prepare first interpolation
+  do ind = 1, size(V_coarse,3)
+    V_permut(:,ind,:) = V_coarse(:,:,ind)
+  end do
+
+  ! Interpolation along last direction = Y-direction
+  call Inter_LastDir_com(2, V_permut, dx_c(2), V_middle, dx_f(2))
+
+  ! Interpolation along X = first direction  + permutation to re-order indices
+  call Inter_FirstDir_Permut_no_com(V_middle, dx_c(1), V_fine, dx_f(1))
+
+end subroutine Inter_XY_permut
+
+
+!> Interpolate a field (ordannate along X,Y,Z) along Y and Z-axis
+!!    @param[in]        V_coarse    = velocity to interpolate along Y and Z directions
+!!    @param[in]        dx_c        = space step on the coarse grid (for second and last directions)
+!!    @param[in,out]    V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for second and last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along the first direction.
+subroutine Inter_YZ(V_coarse, dx_c, V_fine, dx_f)
+
+  use mpi
+
+  ! Input/Output
+  real(WP), dimension(2), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local
+  real(WP), dimension(size(V_coarse,1),size(V_fine,3),size(V_coarse,2))    :: V_middle ! to save result of interpolation along Z + permutation
+
+
+  ! Check if array have the right size
+  if(.not.(size(V_fine,1)==size(V_coarse,1))) then
+    write(*,'(a)') '[ERROR] Interpolation along YZ : V_coarse and V_fine does not have the same resolution along first direction'
+    stop
+  end if
+
+  ! Interpolation along Z + permutation between Y and Z
+  call Inter_LastDir_Permut_com(3, V_coarse, dx_c(2), V_middle, dx_f(2))
+
+  ! Interpolation along Y(=third direction thanks to previous permutation) + permutation between Y and Z
+  call Inter_LastDir_Permut_com(2, V_middle, dx_c(1), V_fine, dx_f(1))
+
+end subroutine Inter_YZ
+
+
+!> Interpolate a field (ordannate along X,Y,Z) along X and Z-axis
+!!    @param[in]        V_coarse    = velocity to interpolate along X and Z directions
+!!    @param[in]        dx_c        = space step on the coarse grid (for first and last directions)
+!!    @param[in,out]    V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for first and last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along the second direction.
+!! direction.
+subroutine Inter_XZ(V_coarse, dx_c, V_fine, dx_f)
+
+  use mpi
+
+  ! Input/Output
+  real(WP), dimension(2), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local
+  real(WP), dimension(size(V_coarse,1),size(V_coarse,2),size(V_fine,3))    :: V_middle ! to save result of interpolation along Z + permutation
+
+
+  ! Check if array have the right size
+  if(.not.(size(V_fine,2)==size(V_coarse,2))) then
+    write(*,'(a)') '[ERROR] Interpolation along XZ : V_coarse and V_fine does not have the same resolution along first direction'
+    stop
+  end if
+
+  ! Interpolation along Z
+  call Inter_LastDir_com(3, V_coarse, dx_c(2), V_middle, dx_f(2))
+
+  ! Interpolation along X
+  call Inter_FirstDir_no_com(V_middle, dx_c(1), V_fine, dx_f(1))
+
+end subroutine Inter_XZ
+
+
+!> Interpolate a field (ordannate along X,Y,Z) along X and Z-axis and get a
+!! field stored in function of (Y,X,Z)
+!!    @param[in]        V_coarse    = velocity to interpolate along X and Z directions
+!!    @param[in]        dx_c        = space step on the coarse grid (for first and last directions)
+!!    @param[in,out]    V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for first and last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along the X-axis.
+subroutine Inter_XZ_permut(V_coarse, dx_c, V_fine, dx_f)
+
+  use mpi
+
+  ! Input/Output
+  real(WP), dimension(2), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local
+  real(WP), dimension(size(V_coarse,1),size(V_coarse,2),size(V_fine,3))    :: V_middle ! to save result of interpolation along Z + permutation
+
+
+  ! Check if array have the right size
+  if(.not.(size(V_fine,1)==size(V_coarse,2))) then
+    write(*,'(a)') '[ERROR] Interpolation along XZ_permut : V_coarse and V_fine does not have the same resolution along first direction'
+    stop
+  end if
+
+  ! Interpolation along Z
+  call Inter_LastDir_com(3, V_coarse, dx_c(2), V_middle, dx_f(2))
+
+  ! Interpolation along X
+  call Inter_FirstDir_Permut_no_com(V_middle, dx_c(1), V_fine, dx_f(1))
+
+end subroutine Inter_XZ_permut
+
+! =================================================================================
+! ====================   Elementary brick = 1D interpolation   ====================
+! =================================================================================
+! Do not use directly this, except for test purpose. If you want to use it,
+! check the input size (not checked here, because already tested in function
+! wich call them, when needed)
+
+!> Interpolate a field along the last direction - with communication : V_fine(i,j,k) = interpolation(V_coarse(i,j,k_interpolation))
+!!    @param[in]        dir         = last directions (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        V_coarse    = velocity to interpolate along the last direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for last direction)
+!!    @param[in,out]    V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along second and third
+!! directions.
+subroutine Inter_LastDir_com(dir, V_coarse, dx_c, V_fine, dx_f)
+
+  use mpi
+
+  ! Input/Output
+  integer, intent(in)                       :: dir
+  real(WP), intent(in)                      :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local variable
+  real(WP), dimension(:,:,:), allocatable   :: V_beg, V_end ! ghost values of velocity
+  real(WP), dimension(stencil_size)         :: weight       ! interpolation weight
+  integer               :: i,ind,i_bis, V_ind   ! some loop indices
+  integer               :: ind_max, ind_min, ind_limit
+  real(WP)              :: pos
+  integer               :: N_coarse, N_fine     ! number of grid points
+  integer, dimension(2) :: com_size             ! size of mpi communication for ghost points
+  integer, dimension(2) :: rece_request         ! mpi communication request (handle) of nonblocking receive
+  integer, dimension(MPI_STATUS_SIZE)         :: status  ! mpi status (for mpi_wait)
+  integer               :: ierr                 ! mpi error code
+
+  ! Initialisation
+  com_size = size(V_coarse,1)*size(V_coarse,2)
+  com_size(1) = com_size(1)*(stencil_g)
+  com_size(2) = com_size(2)*(stencil_d)
+  N_coarse = size(V_coarse,3)
+  N_fine = size(V_fine,3)
+  ! ind_max = max(indice ind on fine grid as V_fine(i) can be computed without communnication)
+  !         = max{ind : V_ind=floor[(ind-1)*dx_f/dx_c]+1 <= (N_coarse-stencil_d)}
+  !         = max{ind : V_ind=floor[(ind-1)*dx_f/dx_c]+1 < (N_coarse-stencil_d+1)}
+  !         = max{ind : pos=(ind-1)*dx_f < [(N_coarse-stencil_d+1)-1]*dx_c}
+  !         = max{ind : pos=(ind-1) < [(N_coarse-stencil_d+1)-1]*dx_c/dx_f}
+  !         = max{ind : pos=ind < [(N_coarse-stencil_d+1)-1]*dx_c/dx_f + 1}
+  ind_max = ceiling((N_coarse-stencil_d)*dx_c/dx_f) - 1
+  ind_min = ceiling((stencil_g)*dx_c/dx_f)+1
+
+  ! ==== Communication ====
+  if(stencil_g>0) then
+    allocate(V_beg(size(V_coarse,1),size(V_coarse,2),stencil_g))
+    ! Initiate non blocking receive
+    call Mpi_Irecv(V_beg(1,1,1),com_size(1),MPI_DOUBLE_PRECISION, &
+      & neighbors(dir,-1), 1, D_comm(dir), rece_request(1), ierr)
+    ! Send data
+    call Mpi_Send(V_coarse(:,:,N_coarse-stencil_g+1),com_size(1),MPI_DOUBLE_PRECISION, &
+      & neighbors(dir,1), 1, D_comm(dir), ierr)
+  end if
+
+  if(stencil_d>0) then
+    allocate(V_end(size(V_coarse,1),size(V_coarse,2),stencil_d))
+    ! Initiate non blocking receive
+    call Mpi_Irecv(V_end(1,1,1),com_size(2),MPI_DOUBLE_PRECISION, &
+      & neighbors(dir,1), 2, D_comm(dir), rece_request(2), ierr)
+    ! Send data
+    call Mpi_Send(V_coarse(:,:,1),com_size(2),MPI_DOUBLE_PRECISION, &
+      & neighbors(dir,-1), 2, D_comm(dir), ierr)
+  else
+  end if
+
+  ! ==== Interpolation ====
+  ! -- For middle points --
+  do ind = ind_min, ind_max
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    weight = get_weight(pos-V_ind+1)
+    V_ind = V_ind - stencil_g
+    V_fine(:,:,ind) = weight(1)*V_coarse(:,:,V_ind)
+    do i = 1, (stencil_size - 1)
+      V_fine(:,:,ind) = V_fine(:,:,ind) + weight(i+1)*V_coarse(:,:,V_ind+i)
+    end do
+  end do
+  ! -- For begining --
+  if(stencil_g>0) call mpi_wait(rece_request(1), status, ierr)
+  ! Use that interpolation formula are exact
+  V_fine(:,:,1) = V_coarse(:,:,1)
+  ! For other first points
+  do ind = 2, ind_min-1
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    weight = get_weight(pos-V_ind+1)
+    !V_ind = V_ind - stencil_g
+    !V_fine(:,:,ind) = weight(1)*V_beg(:,:,V_ind+stencil_g) ! Array start from 1
+    V_fine(:,:,ind) = weight(1)*V_beg(:,:,V_ind) ! Array start from 1
+    ind_limit = stencil_g - V_ind + 1
+    do i = 2, ind_limit
+      !V_fine(:,:,ind) = V_fine(:,:,ind) + weight(i)*V_beg(:,:,V_ind+stencil_g+i)
+      V_fine(:,:,ind) = V_fine(:,:,ind) + weight(i)*V_beg(:,:,V_ind-1+i) ! first point in V_beg stands for 1-stencil_g position
+    end do
+    ! We look for first local value of V_coarse at position (:,:,1) ! (array starts at 1)
+    do i_bis = ind_limit+1, stencil_size
+      V_fine(:,:,ind) = V_fine(:,:,ind) + weight(i_bis)*V_coarse(:,:,i_bis-ind_limit)
+    end do
+  end do
+  ! -- For point of at the end of a line along the current direction --
+  if(stencil_d>0) call mpi_wait(rece_request(2), status, ierr)
+  do ind = ind_max+1, N_fine
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    weight = get_weight(pos-V_ind+1)
+    !V_ind = V_ind - stencil_g
+    V_ind = V_ind - stencil_g-1
+    V_fine(:,:,ind) = weight(1)*V_coarse(:,:,V_ind+1)
+    ind_limit = min((stencil_size),N_coarse-V_ind)
+    do i = 2, ind_limit
+      V_fine(:,:,ind) = V_fine(:,:,ind) + weight(i)*V_coarse(:,:,V_ind+i)
+    end do
+    V_ind = V_ind - N_coarse
+    do i_bis = ind_limit+1, stencil_size
+      V_fine(:,:,ind) = V_fine(:,:,ind) + weight(i_bis)*V_end(:,:,i_bis+V_ind)
+    end do
+  end do
+
+  ! Free memory
+  if(stencil_d>0) deallocate(V_end)
+  if(stencil_g>0) deallocate(V_beg)
+
+end subroutine Inter_LastDir_com
+
+
+!> Interpolate a field along the last direction and permut second and third directions - with communication : V_fine(i,j,k) = interpolation(V_coarse(i,j,k_interpolation))
+!!    @param[in]        dir         = last directions (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        V_coarse    = velocity to interpolate along the last direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for last direction)
+!!    @param[in]        V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along second and third
+!! direction.
+subroutine Inter_LastDir_Permut_com(dir, V_coarse, dx_c, V_fine, dx_f)
+
+  use mpi
+
+  ! Input/Output
+  integer, intent(in)                       :: dir
+  real(WP), intent(in)                      :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local variable
+  real(WP), dimension(:,:,:), allocatable   :: V_beg, V_end ! ghost values of velocity
+  real(WP), dimension(stencil_size)         :: weight       ! interpolation weight
+  integer               :: i,ind,i_bis, V_ind   ! some loop indices
+  integer               :: ind_max, ind_min, ind_limit
+  real(WP)              :: pos
+  integer               :: N_coarse, N_fine     ! number of grid points
+  integer, dimension(2) :: com_size             ! size of mpi communication for ghost points
+  integer, dimension(2) :: rece_request         ! mpi communication request (handle) of nonblocking receive
+  integer, dimension(MPI_STATUS_SIZE)         :: status  ! mpi status (for mpi_wait)
+  integer               :: ierr                 ! mpi error code
+
+  ! Initialisation
+  com_size = size(V_coarse,1)*size(V_coarse,2)
+  com_size(1) = com_size(1)*(stencil_g)
+  com_size(2) = com_size(2)*(stencil_d)
+  N_coarse = size(V_coarse,3)
+  N_fine = size(V_fine,2)
+  ind_max = ceiling((N_coarse-stencil_d)*dx_c/dx_f) - 1
+  ind_min = ceiling((stencil_g)*dx_c/dx_f)+1
+
+  ! ==== Communication ====
+  if(stencil_g>0) then
+    allocate(V_beg(size(V_coarse,1),size(V_coarse,2),stencil_g))
+    ! Initiate non blocking receive
+    call Mpi_Irecv(V_beg(1,1,1),com_size(1),MPI_DOUBLE_PRECISION, &
+      & neighbors(dir,-1), 1, D_comm(dir), rece_request(1), ierr)
+    ! Send data
+    call Mpi_Send(V_coarse(:,:,N_coarse-stencil_g+1),com_size(1),MPI_DOUBLE_PRECISION, &
+      & neighbors(dir,1), 1, D_comm(dir), ierr)
+  end if
+
+  if(stencil_d>0) then
+    allocate(V_end(size(V_coarse,1),size(V_coarse,2),stencil_d))
+    ! Initiate non blocking receive
+    call Mpi_Irecv(V_end(1,1,1),com_size(2),MPI_DOUBLE_PRECISION, &
+      & neighbors(dir,1), 2, D_comm(dir), rece_request(2), ierr)
+    ! Send data
+    call Mpi_Send(V_coarse(:,:,1),com_size(2),MPI_DOUBLE_PRECISION, &
+      & neighbors(dir,-1), 2, D_comm(dir), ierr)
+  else
+  end if
+
+  ! ==== Interpolation ====
+  ! -- For middle points --
+  do ind = ind_min, ind_max
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    weight = get_weight(pos-V_ind+1)
+    V_ind = V_ind - stencil_g
+    V_fine(:,ind,:) = weight(1)*V_coarse(:,:,V_ind)
+    do i = 1, (stencil_size - 1)
+      V_fine(:,ind,:) = V_fine(:,ind,:) + weight(i+1)*V_coarse(:,:,V_ind+i)
+    end do
+  end do
+  ! -- For begining --
+  if(stencil_g>0) call mpi_wait(rece_request(1), status, ierr)
+  ! Use that interpolation formula are exact
+  V_fine(:,1,:) = V_coarse(:,:,1)
+  ! For other first points
+  do ind = 2, ind_min-1
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    weight = get_weight(pos-V_ind+1)
+    V_fine(:,ind,:) = weight(1)*V_beg(:,:,V_ind) ! Array start from 1
+    ind_limit = stencil_g - V_ind + 1
+    do i = 2, ind_limit
+      V_fine(:,ind,:) = V_fine(:,ind,:) + weight(i)*V_beg(:,:,V_ind-1+i) ! first point in V_beg stands for 1-stencil_g position
+    end do
+    do i_bis = ind_limit+1, stencil_size
+      V_fine(:,ind,:) = V_fine(:,ind,:) + weight(i_bis)*V_coarse(:,:,i_bis-ind_limit)
+    end do
+  end do
+  ! -- For point of at the end of a line along the current direction --
+  if(stencil_d>0) call mpi_wait(rece_request(2), status, ierr)
+  do ind = ind_max+1, N_fine
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    weight = get_weight(pos-V_ind+1)
+    !V_ind = V_ind - stencil_g
+    V_ind = V_ind - stencil_g -1
+    V_fine(:,ind,:) = weight(1)*V_coarse(:,:,V_ind+1)
+    ind_limit = min((stencil_size),N_coarse-V_ind)
+    do i = 2, ind_limit
+      V_fine(:,ind,:) = V_fine(:,ind,:) + weight(i)*V_coarse(:,:,V_ind+i)
+    end do
+    V_ind = V_ind - N_coarse
+    do i_bis = ind_limit+1, stencil_size
+      V_fine(:,ind,:) = V_fine(:,ind,:) + weight(i_bis)*V_end(:,:,i_bis+V_ind)
+    end do
+  end do
+
+  ! Free memory
+  if(stencil_d>0) deallocate(V_end)
+  if(stencil_g>0) deallocate(V_beg)
+
+end subroutine Inter_LastDir_Permut_com
+
+
+!> Interpolate a field along the first direction - no communication : V_fine(i,j,k) = interpolation(V_coarse(i_interpolation,j,k))
+!!    @param[in]        V_coarse    = velocity to interpolate along the first direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for first direction)
+!!    @param[in]        V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for first direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along second and last
+!! direction.
+subroutine Inter_FirstDir_no_com(V_coarse, dx_c, V_fine, dx_f)
+
+  ! Input/Output
+  real(WP), intent(in)                      :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local variable
+  real(WP), dimension(stencil_size)         :: weight       ! interpolation weight
+  integer               :: N_coarse, N_fine                 ! number of grid points
+  integer               :: i, ind, V_ind                    ! some loop indices
+  real(WP)              :: pos
+
+  ! ==== Initialisation ====
+  N_coarse = size(V_coarse,1)
+  N_fine = size(V_fine,1)
+
+  ! ==== Interpolation ====
+  ! Use periodicity for boundaries
+  do ind = 1, N_fine
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    weight = get_weight(pos-V_ind+1)
+    V_ind = V_ind - stencil_g
+    V_fine(ind,:,:) = weight(1)*V_coarse(modulo(V_ind-1,N_coarse)+1,:,:)
+    do i = 1, (stencil_size - 1)
+      V_fine(ind,:,:) = V_fine(ind,:,:) + weight(i+1)*V_coarse(modulo(V_ind+i-1,N_coarse)+1,:,:)
+    end do
+  end do
+
+end subroutine Inter_FirstDir_no_com
+
+
+!> Interpolate a field along the first direction and permute first and second direction - no communication : V_fine(j,i,k) = interpolation(V_coarse(i_interpolation,j,k))
+!!    @param[in]        V_coarse    = velocity to interpolate along the first direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for first direction)
+!!    @param[in]        V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for first direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along the directions
+!! without interpolation.
+subroutine Inter_FirstDir_Permut_no_com(V_coarse, dx_c, V_fine, dx_f)
+
+  ! Input/Output
+  real(WP), intent(in)                      :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local variable
+  real(WP), dimension(stencil_size)         :: weight       ! interpolation weight
+  integer               :: N_coarse, N_fine                 ! number of grid points
+  integer               :: i, ind, V_ind                    ! some loop indices
+  integer               :: i1, i2                           ! for permutation along the two first direction
+  real(WP)              :: pos, V_current
+
+  ! ==== Initialisation ====
+  N_coarse = size(V_coarse,1)
+  N_fine = size(V_fine,2)
+
+  ! ==== Interpolation ====
+  ! Use periodicity for boundaries
+  do ind = 1, N_fine
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    weight = get_weight(pos-V_ind+1)
+    V_ind = V_ind - stencil_g
+    do i2 = 1, size(V_coarse,3)
+      do i1 = 1, size(V_coarse,2)
+        V_current = weight(1)*V_coarse(modulo(V_ind-1,N_coarse)+1,i1,i2)
+        do i = 1, (stencil_size - 1)
+          V_current = V_current + weight(i+1)*V_coarse(modulo(V_ind+i-1,N_coarse)+1,i1,i2)
+        end do
+        V_fine(i1,ind,i2) = V_current
+      end do
+    end do
+  end do
+
+end subroutine Inter_FirstDir_Permut_no_com
+
+
+!> Interpolate a field along the first direction - with communication : V_fine(i,j,k) = interpolation(V_coarse(i_interpolation,j,k))
+!! Variant with communication and where first direction can be different than X-axis.
+!!    @param[in]        dir         = last directions (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        V_coarse    = velocity to interpolate along the last direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for last direction)
+!!    @param[in,out]    V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along second and third
+!! directions.
+subroutine Inter_FirstDir_com(dir, V_coarse, dx_c, V_fine, dx_f)
+
+  use mpi
+
+  ! Input/Output
+  integer, intent(in)                       :: dir
+  real(WP), intent(in)                      :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local variable
+  real(WP), dimension(:,:,:), allocatable   :: V_beg, V_end ! received ghost values of velocity
+  real(WP), dimension(:,:,:), allocatable   :: V_s1, V_s2   ! ghost values of velocity to send
+  real(WP), dimension(stencil_size)         :: weight       ! interpolation weight
+  integer               :: i,ind,i_bis, V_ind   ! some loop indices
+  integer               :: ind_max, ind_min, ind_limit
+  real(WP)              :: pos
+  integer               :: N_coarse, N_fine     ! number of grid points
+  integer, dimension(2) :: com_size             ! size of mpi communication for ghost points
+  integer, dimension(2) :: rece_request         ! mpi communication request (handle) of nonblocking receive
+  integer, dimension(MPI_STATUS_SIZE)         :: status  ! mpi status (for mpi_wait)
+  integer               :: ierr                 ! mpi error code
+
+  ! Initialisation
+  com_size = size(V_coarse,2)*size(V_coarse,3)
+  com_size(1) = com_size(1)*(stencil_g)
+  com_size(2) = com_size(2)*(stencil_d)
+  N_coarse = size(V_coarse,1)
+  N_fine = size(V_fine,1)
+  ind_max = ceiling((N_coarse-stencil_d)*dx_c/dx_f) - 1
+  ind_min = ceiling((stencil_g)*dx_c/dx_f)+1
+
+  ! ==== Communication ====
+  if(stencil_g>0) then
+    allocate(V_beg(stencil_g,size(V_coarse,2),size(V_coarse,3)))
+    ! Initiate non blocking receive
+    call Mpi_Irecv(V_beg(1,1,1),com_size(1),MPI_DOUBLE_PRECISION, &
+      & neighbors(dir,-1), 1, D_comm(dir), rece_request(1), ierr)
+    ! Send data
+    allocate(V_s1(stencil_g,size(V_coarse,2),size(V_coarse,3)))
+    V_s1 = V_coarse(N_coarse-stencil_g+1:N_coarse,:,:)
+    call Mpi_Send(V_s1(1,1,1),com_size(1),MPI_DOUBLE_PRECISION, &
+      & neighbors(dir,1), 1, D_comm(dir), ierr)
+  end if
+
+  if(stencil_d>0) then
+    allocate(V_end(stencil_d,size(V_coarse,2),size(V_coarse,3)))
+    ! Initiate non blocking receive
+    call Mpi_Irecv(V_end(1,1,1),com_size(2),MPI_DOUBLE_PRECISION, &
+      & neighbors(dir,1), 2, D_comm(dir), rece_request(2), ierr)
+    ! Send data
+    allocate(V_s2(stencil_d,size(V_coarse,2),size(V_coarse,3)))
+    V_s2 = V_coarse(1:stencil_d,:,:)
+    call Mpi_Send(V_s2(1,1,1),com_size(2),MPI_DOUBLE_PRECISION, &
+      & neighbors(dir,-1), 2, D_comm(dir), ierr)
+  else
+  end if
+
+  ! ==== Interpolation ====
+  ! -- For middle points --
+  do ind = ind_min, ind_max
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    weight = get_weight(pos-V_ind+1)
+    V_ind = V_ind - stencil_g
+    V_fine(ind,:,:) = weight(1)*V_coarse(V_ind,:,:)
+    do i = 1, (stencil_size - 1)
+      V_fine(ind,:,:) = V_fine(ind,:,:) + weight(i+1)*V_coarse(V_ind+i,:,:)
+    end do
+  end do
+  ! -- For begining --
+  if(stencil_g>0) call mpi_wait(rece_request(1), status, ierr)
+  ! Use that interpolation formula are exact
+  V_fine(1,:,:) = V_coarse(1,:,:)
+  ! For other first points
+  do ind = 2, ind_min-1
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    weight = get_weight(pos-V_ind+1)
+    !V_ind = V_ind - stencil_g
+    V_fine(ind,:,:) = weight(1)*V_beg(V_ind,:,:) ! Array start from 1
+    ind_limit = stencil_g - V_ind + 1
+    do i = 2, ind_limit
+      V_fine(ind,:,:) = V_fine(ind,:,:) + weight(i)*V_beg(V_ind-1+i,:,:) ! first point in V_beg stands for 1-stencil_g position
+    end do
+    do i_bis = ind_limit+1, stencil_size
+      V_fine(ind,:,:) = V_fine(ind,:,:) + weight(i_bis)*V_coarse(i_bis-ind_limit,:,:)
+    end do
+  end do
+  ! -- For point of at the end of a line along the current direction --
+  if(stencil_d>0) call mpi_wait(rece_request(2), status, ierr)
+  do ind = ind_max+1, N_fine
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    weight = get_weight(pos-V_ind+1)
+    !V_ind = V_ind - stencil_g
+    V_ind = V_ind - stencil_g-1
+    V_fine(ind,:,:) = weight(1)*V_coarse(V_ind+1,:,:)
+    ind_limit = min((stencil_size),N_coarse-V_ind)
+    do i = 2, ind_limit
+      V_fine(ind,:,:) = V_fine(ind,:,:) + weight(i)*V_coarse(V_ind+i,:,:)
+    end do
+    V_ind = V_ind - N_coarse
+    do i_bis = ind_limit+1, stencil_size
+      V_fine(ind,:,:) = V_fine(ind,:,:) + weight(i_bis)*V_end(i_bis+V_ind,:,:)
+    end do
+  end do
+
+  ! Free memory
+  if(stencil_d>0) deallocate(V_end)
+  if(stencil_d>0) deallocate(V_s2)
+  if(stencil_g>0) deallocate(V_beg)
+  if(stencil_g>0) deallocate(V_s1)
+
+end subroutine Inter_FirstDir_com
+
+
+function weight_Mprime4(pos) result(weight)
+
+  real(WP), intent(in)              :: pos
+  real(WP), dimension(stencil_size) :: weight
+
+  !weight(1)  = ((2.-(pos+1.))**2 * (1.-(pos+1.)))/2.
+  weight(1) = (pos * (pos * (-pos + 2.) - 1.)) / 2.
+  !weight(3) = 1.-2.5*(1.-pos)**2 + 1.5*(1.-pos)**3
+  weight(3) = (pos * (pos * (-3. * pos + 4.) + 1.)) / 2.
+  !weight(4) = ((2.-(2.-pos))**2 * (1.-(2.-pos)))/2.
+  weight(4) = (pos * pos * (pos - 1.)) / 2.
+  !weight(2) = 1.- 2.5*pos**2 + 1.5*pos**3
+  weight(2) = 1. - (weight(1)+weight(3)+weight(4))
+
+
+end function weight_Mprime4
+
+
+function weight_M4(pos) result(weight)
+
+  real(WP), intent(in)              :: pos
+  real(WP), dimension(stencil_size) :: weight
+
+  ! kernel =
+  !(1._WP/6._WP)*((-X+2)**3)  if 1<=abs(X)<2
+  !(1._WP/6._WP)*((-X+2)**3) - (4._WP/6._WP)*((-X+1)**3) if abs(X) < 1
+
+  !weight(1) = (1._WP/6._WP)*((-(pos+1)+2)**3)
+  weight(1) = (1._WP/6._WP)*((-pos+1._WP)**3)
+  !weight(2) = (1._WP/6._WP)*((-pos+2)**3) - (4._WP/6._WP)*((-pos+1)**3)
+  !weight(3) = (1._WP/6._WP)*((-(1-pos)+2)**3) - (4._WP/6._WP)*((-(1-pos)+1)**3)
+  weight(3) = (1._WP/6._WP)*((pos+1)**3) - (4._WP/6._WP)*(pos**3)
+  weight(4) = (1._WP/6._WP)*(pos**3)
+  weight(2) = 1. - (weight(1)+weight(3)+weight(4))
+
+
+end function weight_M4
+
+
+function weight_Lambda4_4(pos) result(weight)
+
+  real(WP), intent(in)              :: pos
+  real(WP), dimension(stencil_size) :: weight
+
+    weight(1) = (pos*(pos*(pos*(pos*(pos*(pos*(pos*(pos*(-46. * pos + 207.) - 354.) + 273.) - 80.) + 1.) - 2.)- 1.) + 2.)) / 24.
+    weight(2) = (pos*(pos*(pos*(pos*(pos*(pos*(pos*(pos*(230. * pos - 1035.) +1770.) - 1365.) + 400.) - 4.) + 4.) + 16.) - 16.)) / 24.
+    weight(3) = (pos* pos*(pos*pos* (pos*(pos*(pos*(pos*(-460.* pos + 2070.) - 3540.) + 2730.) - 800.) + 6.) - 30.)+ 24.) / 24.
+    weight(4) = (pos*(pos*(pos*(pos*(pos*(pos*(pos*(pos*(460. * pos - 2070.) + 3540.) - 2730.) + 800.) - 4.) - 4.) + 16.) + 16.)) / 24.
+    !weight(5) = (pos*(pos*(pos*(pos*(pos*(pos*(pos*(pos * (-230. * pos + 1035.) - 1770.) + 1365.) - 400.) + 1.) + 2.) - 1.) - 2.)) / 24.
+    weight(6) = (pos*pos*pos*pos*pos*(pos*(pos * (pos * (46. * pos - 207.) + 354.) - 273.) + 80.)) / 24.
+    weight(5) = 1. - (weight(1)+weight(2)+weight(3)+weight(4)+weight(6))
+
+
+end function weight_Lambda4_4
+
+
+end module Interpolation_velo
+!> @}
diff --git a/HySoP/src/scalesInterface/precision_tools.f90 b/HySoP/src/scalesInterface/precision_tools.f90
index e5880487b..2818f9ad9 100644
--- a/HySoP/src/scalesInterface/precision_tools.f90
+++ b/HySoP/src/scalesInterface/precision_tools.f90
@@ -1,4 +1,11 @@
-!> @addtogroup toolbox 
+!USEFORTEST toolbox
+!USEFORTEST postprocess
+!USEFORTEST advec
+!USEFORTEST io
+!USEFORTEST topo
+!USEFORTEST avgcond
+!USEFORTEST interpolation
+!> @addtogroup toolbox
 !! @{
 !------------------------------------------------------------------------------
 !
@@ -7,7 +14,7 @@
 !> @author
 !> Guillaume Balarac, LEGI
 !
-! DESCRIPTION: 
+! DESCRIPTION:
 !> The aim of this module is set some parameters to fix the working data
 !> representation in the code. It is set to double precision for REAL.
 !------------------------------------------------------------------------------
-- 
GitLab