From 32db6e6ea28dcb9daf1b4d4042a825e79291736b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Franck=20P=C3=A9rignon?= <franck.perignon@imag.fr>
Date: Fri, 8 Jun 2012 07:13:45 +0000
Subject: [PATCH] Reorg and merge Parmes/scales

---
 HySoP/src/Domain.f90                          |   83 +
 HySoP/src/Fields.f90                          |  143 +
 HySoP/src/{main => }/Penalize.f90             |   11 +-
 HySoP/src/{main => }/SetsIndicators.f90       |  209 +-
 HySoP/src/{main => }/TestFunctions.f90        |  214 +-
 HySoP/src/{main => }/Topology.f90             |  125 +-
 HySoP/src/Unstable/LEGI/CMakeLists.txt        |  207 --
 HySoP/src/{main => }/VectorCalc.f90           |  320 ++-
 HySoP/src/client_data.f90                     |   32 +
 HySoP/src/{main => }/io_vtk.f90               |   49 +-
 HySoP/src/main/Domain.f90                     |   80 -
 HySoP/src/main/Fields.f90                     |   67 -
 HySoP/src/main/NavierStokes2D.f90             |  603 +++++
 HySoP/src/main/Particles.f90                  |  369 ---
 HySoP/src/main/client_data.f90                |   14 -
 HySoP/src/main/main.cxx                       |   37 +-
 HySoP/src/{main => }/parmesTools.f90          |    0
 HySoP/src/poisson/Poisson.f90                 |  301 +++
 HySoP/src/{main => poisson}/Solver.f90        |  143 +-
 HySoP/src/poisson/poisson_init.f90            |  597 +++++
 HySoP/src/ppmInterface/PPMFields.f90          |   80 +
 HySoP/src/ppmInterface/Particles.f90          | 1445 +++++++++++
 HySoP/src/scalesInterface/CMakeLists.txt      |   84 +
 .../src/scalesInterface/layout/cart_mesh.f90  |  127 +
 .../scalesInterface/layout/cart_topology.f90  |  658 +++++
 .../scalesInterface/output/parallel_io.f90    |  433 ++++
 .../output/parallel_io_bin.f90                |  468 ++++
 HySoP/src/scalesInterface/particles/advec.f90 |  151 ++
 .../src/scalesInterface/particles/advecX.f90  |  757 ++++++
 .../src/scalesInterface/particles/advecY.f90  |  645 +++++
 .../src/scalesInterface/particles/advecZ.f90  |  657 +++++
 .../particles/advec_common.f90                | 2292 +++++++++++++++++
 .../particles/advec_common_line.f90           |  534 ++++
 .../particles/advec_remesh_formula.f90        |   61 +
 .../particles/advec_variables.f90             |  175 ++
 HySoP/src/scalesInterface/precision.f90       |   28 +
 HySoP/src/scalesInterface/string.f90          |   18 +
 37 files changed, 11133 insertions(+), 1084 deletions(-)
 create mode 100755 HySoP/src/Domain.f90
 create mode 100755 HySoP/src/Fields.f90
 rename HySoP/src/{main => }/Penalize.f90 (90%)
 rename HySoP/src/{main => }/SetsIndicators.f90 (73%)
 rename HySoP/src/{main => }/TestFunctions.f90 (51%)
 rename HySoP/src/{main => }/Topology.f90 (50%)
 delete mode 100644 HySoP/src/Unstable/LEGI/CMakeLists.txt
 rename HySoP/src/{main => }/VectorCalc.f90 (53%)
 create mode 100755 HySoP/src/client_data.f90
 rename HySoP/src/{main => }/io_vtk.f90 (78%)
 delete mode 100755 HySoP/src/main/Domain.f90
 delete mode 100755 HySoP/src/main/Fields.f90
 create mode 100755 HySoP/src/main/NavierStokes2D.f90
 delete mode 100755 HySoP/src/main/Particles.f90
 delete mode 100755 HySoP/src/main/client_data.f90
 rename HySoP/src/{main => }/parmesTools.f90 (100%)
 create mode 100755 HySoP/src/poisson/Poisson.f90
 rename HySoP/src/{main => poisson}/Solver.f90 (52%)
 create mode 100755 HySoP/src/poisson/poisson_init.f90
 create mode 100755 HySoP/src/ppmInterface/PPMFields.f90
 create mode 100755 HySoP/src/ppmInterface/Particles.f90
 create mode 100644 HySoP/src/scalesInterface/CMakeLists.txt
 create mode 100644 HySoP/src/scalesInterface/layout/cart_mesh.f90
 create mode 100644 HySoP/src/scalesInterface/layout/cart_topology.f90
 create mode 100644 HySoP/src/scalesInterface/output/parallel_io.f90
 create mode 100644 HySoP/src/scalesInterface/output/parallel_io_bin.f90
 create mode 100644 HySoP/src/scalesInterface/particles/advec.f90
 create mode 100644 HySoP/src/scalesInterface/particles/advecX.f90
 create mode 100644 HySoP/src/scalesInterface/particles/advecY.f90
 create mode 100644 HySoP/src/scalesInterface/particles/advecZ.f90
 create mode 100644 HySoP/src/scalesInterface/particles/advec_common.f90
 create mode 100644 HySoP/src/scalesInterface/particles/advec_common_line.f90
 create mode 100644 HySoP/src/scalesInterface/particles/advec_remesh_formula.f90
 create mode 100644 HySoP/src/scalesInterface/particles/advec_variables.f90
 create mode 100644 HySoP/src/scalesInterface/precision.f90
 create mode 100644 HySoP/src/scalesInterface/string.f90

diff --git a/HySoP/src/Domain.f90 b/HySoP/src/Domain.f90
new file mode 100755
index 000000000..d20f09981
--- /dev/null
+++ b/HySoP/src/Domain.f90
@@ -0,0 +1,83 @@
+!> Physical domain definition and its discretisation.
+!! Note FP : fortran structures might be better but I avoid them on purpose, for f2py better compat. 
+module Domain
+
+  use client_data
+  
+  implicit none
+
+  private
+  public init_geometry, init_grid
+  public physDomainLowerPoint,physDomainUpperPoint,grid_resolution,grid_step,domain_ghostsize,domainLength,domain_bc,gridCells
+
+  !> Physical domain upper limit
+  real(mk), dimension(:), pointer :: physDomainUpperPoint=>NULL()
+  !> Physical domain lower limit
+  real(mk), dimension(:), pointer :: physDomainLowerPoint =>NULL()
+  !> Sizes of the domain
+  real(mk), dimension(:), pointer :: domainLength =>NULL()
+  !> Boundary conditions for the domain
+  integer,  dimension(:), pointer :: domain_bc=>NULL()
+  !> Number of ghost points in each direction
+  integer,  dimension(:), pointer :: domain_ghostsize=>NULL()
+  !> Grid resolution (number of points)
+  integer,  dimension(:), pointer :: grid_resolution =>NULL()
+  !> Number of cells (in each dir) on the grid
+  integer,  dimension(dim3) :: gridCells
+  !> Grid space step sizes
+  real(mk),  dimension(:), pointer :: grid_step =>NULL()
+  
+
+  integer, private :: istat
+
+contains
+  
+  !> Set continuous domain geometry
+  !> \param boundary conditions type (ppm way)
+  subroutine init_geometry(bc)
+    
+    !> Boundary conditions type (for ppm)
+    integer, intent(in) :: bc
+    
+    ! Domain size and min/max coords
+    allocate(physDomainLowerPoint(dim3), physDomainUpperPoint(dim3), domainLength(dim3),stat = istat)
+    if(istat.ne.0) stop 'Geometry, allocation failed'
+    physDomainUpperPoint = 0.0
+    physDomainLowerPoint = 0.0
+    physDomainUpperPoint(c_X) = 6.0!2.*pi!!1.0
+    physDomainUpperPoint(c_Y) = 4.0!2.*pi!1.0
+    !physDomainLowerPoint(c_X) = 0.0
+    !physDomainLowerPoint(c_Y) = 0.0
+    ! Boundary conditions and ghosts
+    allocate(domain_bc(2*dime), domain_ghostsize(dime), stat = istat)
+    if(istat.ne.0) stop 'BC, allocation failed'
+    domain_bc = bc
+    domain_ghostsize = 0 ! Warning : It seems that ghost>0 is required for ppm remesh 
+    domainLength = physDomainUpperPoint - physDomainLowerPoint
+
+  end subroutine init_geometry
+
+  !> Set discretisation parameters for the continuous domain.
+  !! Resolution corresponds to the number of points in one direction.
+  !! For periodic boundaries, point with index 1 is the same as point with last index
+  !! but must be present (ppm required)
+  subroutine init_grid()
+    
+    allocate(grid_resolution(dim3),grid_step(dim3),stat=istat)
+    if(istat.ne.0) stop 'grid_resolution, allocation failed'
+    grid_resolution = 1
+    grid_resolution(c_X) = 129!2001
+    grid_resolution(c_Y) = 65!1601
+!!$    grid_step(c_X) = (physDomainUpperPoint(c_X) - physDomainLowerPoint(c_X))/(real(grid_resolution(c_X),mk)-1.)
+!!$    grid_step(c_Y) = (physDomainUpperPoint(c_Y) - physDomainLowerPoint(c_Y))/(real(grid_resolution(c_Y),mk)-1.)
+!!$    grid_step(c_Z) = 1.
+!!$    grid_resolution(c_X) = 2001
+!!    grid_resolution = 256
+!!$    grid_resolution(c_Z) = 1
+    gridCells = max(grid_resolution-1,1)
+!!$    gridCells(c_Z) = 1
+    grid_step = (physDomainUpperPoint - physDomainLowerPoint)/gridCells
+    
+  end subroutine init_grid
+
+end module Domain
diff --git a/HySoP/src/Fields.f90 b/HySoP/src/Fields.f90
new file mode 100755
index 000000000..31a752427
--- /dev/null
+++ b/HySoP/src/Fields.f90
@@ -0,0 +1,143 @@
+!> Declaration, allocation of all the fields on the grid.
+module Fields
+  
+  use client_data
+  use mpi
+  use PPMFields
+
+  implicit none
+
+  !> Velocity 
+  real(mk), dimension(:,:,:,:), pointer :: velocity => NULL()
+  !> Vorticity
+  real(mk), dimension(:,:,:,:), pointer :: vorticity =>NULL()
+  !> Vorticity (a scalar in the 2D case)
+  real(mk), dimension(:,:,:), pointer :: vorticity2D =>NULL()
+  !> Stream function - Test purpose. Useless if ppm fft solver works as it is supposed to ...
+  ! real(mk), dimension(:,:,:,:,:), pointer :: stream_function =>NULL()
+  !> rhs of vorticity eq (i.e. stretch + diffusion terms)
+  real(mk), dimension(:,:,:,:), pointer :: rhs =>NULL()
+  real(mk), dimension(:,:,:), pointer :: gauss =>NULL()
+  !> 
+  !real(mk), dimension(:,:,:,:,:), pointer :: vel_ex => NULL()
+  !> Scalar on the grid, test purpose for chi functions
+  real(mk),dimension(:,:,:),pointer::testFunc=>NULL()
+  !> Scalar on the grid
+  real(mk),dimension(:,:,:),pointer::scalar=>NULL()
+  
+contains
+  
+  !> Fields allocation. 
+  !! Warning : ghostpoints must be included in field (i.e. size = "real size" + 2*number of ghostpoints)
+  subroutine initFields(resolution,ghostsize)
+    
+    !> Required resolution for the fields (without ghosts)
+    integer, dimension(dime), intent(in) :: resolution
+    !> number of ghost points in each direction (ghost(c_X) = 2 means resolution(c_X)+ 4 points for the field)
+    integer, dimension(:),pointer:: ghostsize
+    
+    integer::istat
+    ! Lower and upper bounds for fields
+    integer, dimension(dime) :: ldl, ldu
+    ! nsublist from ppm topo. Assumed to be equal to 1 see Topology.f90
+
+    ldl = 1 - ghostsize
+    ldu = resolution + ghostsize
+
+
+    if(dime==2) then
+       call initPPMFields2D(resolution,ghostsize)
+       velocity => PPMvelocity2D
+       vorticity2D => PPMvorticity2D
+       scalar => PPMscalar2D
+       allocate(gauss(ldl(c_X):ldu(c_X),ldl(c_Y):ldu(c_Y),1), stat = istat)
+       allocate(rhs(dime,ldl(c_X):ldu(c_X),ldl(c_Y):ldu(c_Y),1), stat = istat)
+       
+    else if(dime == 3) then
+       call initPPMFields3D(resolution,ghostsize)
+       velocity => PPMvelocity3D(:,:,:,:,1)
+       vorticity => PPMvorticity3D(:,:,:,:,1)
+       rhs => PPMrhs3D(:,:,:,:,1)
+       allocate(gauss(ldl(c_X):ldu(c_X),ldl(c_Y):ldu(c_Y),ldl(c_Z):ldu(c_Z)), stat = istat)
+    end if
+
+    allocate(testFunc(resolution(c_X),resolution(c_Y),resolution(c_Z)))
+
+
+!!$    ! Velocity ...
+!!$    allocate(velocity(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist),stat = istat)
+!!$    if(istat.ne.0) stop 'Field allocation error for velocity'
+!!$    ! Vorticity
+!!$    allocate(vorticity(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
+!!$    if(istat.ne.0) stop 'Field allocation error for vorticity'
+!!$    ! rhs
+!!$    allocate(rhs(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
+ 
+!!$    if(istat.ne.0) stop 'Field allocation error for rhs'
+!!$    
+!!$    !allocate(stream_function(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
+!!$    !if(istat.ne.0) stop 'stream_function allocation error for rhs'
+    
+!!    allocate(testFunc(ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3)), stat = istat)
+ !!   if(istat.ne.0) stop 'Field allocation error for testFunc'
+    ! Scalar
+    !allocate(scalar(ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
+    !if(istat.ne.0) stop 'Field allocation error for scalar'
+  end subroutine initFields
+
+  !> compute the size of the memory used to save fields
+  function getMemoryUsedForFields()
+    real(mk) :: getMemoryUsedForFields 
+    getMemoryUsedForFields = sizeof(velocity)+sizeof(vorticity)+sizeof(rhs)+sizeof(testfunc)+sizeof(scalar)
+    getMemoryUsedForFields = getMemoryUsedForFields*1.e-6
+    if(verbose) then
+       write(*,'(a,i3,a,f10.4,a)') &
+            '[',rank,'] Fields have been initialized.  Memory used :', getMemoryUsedForFields, ' MB.'
+    end if
+  end function getMemoryUsedForFields
+
+  !> Shift the velocity x component according to a required flow rate
+  !> \f[ velocity_x = velocity_x + shift \f]
+  !> with
+  !> \f[shift = \frac{reqFlowRate-currentFlowRate}{S_x} \f]
+  !> \f$S_x\f$ being the surface where x=xmin (incoming flow)
+  subroutine shiftVelocityX(reqFlowRate,step,resolution,surf,coordMin,lower)
+    !> Required flow rate
+    real(mk), intent(in) :: reqFlowRate
+    !> Grid step
+    real(mk), dimension(dime), intent(in) :: step
+    !> local resolution
+    integer,dimension(dime),intent(in) :: resolution
+    !> Area of the surface for integration 
+    real(mk) ,intent(in) :: surf
+    !> Coordinates of the minimal point of the current domain
+    real(mk),dimension(dime),intent(in) :: coordMin
+    !> lower bound of the physical domain
+    real(mk),dimension(dime),intent(in) :: lower
+
+    real(mk) :: localShift,globalShift
+    integer :: info
+    localShift = 0 
+    
+    ! We compute the current flow rate through surface x = xmin, for the x component of the velocity : 
+    ! FlowRate = sum(surf x=xmin)( velocity(c_X) ) * step(c_Z)*step(c_Y)
+    ! And use this value to shift the velocity(c_X) to have :
+    ! RequiredFlowRate = FlowRate + Surf(x=xmin)*globalShift
+ 
+    ! Step 1 : compute flowRate
+    if(abs(coordMin(c_X)-lower(c_X)) <= 2.*epsilon(globalShift) ) then
+       ! Compute mean value of the velocity through a face ortho. to x dir.
+       ! Warning: ghost points must be excluded
+       localShift = sum(velocity(c_X,1,1:resolution(c_Y)-1,1:resolution(c_Z)-1))
+    end if
+    ! Step 2 : reduction over all mpi processus ...
+    call MPI_ALLReduce(localShift,globalShift,1,MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD,info)
+    ! Step 3 : set global shift
+    globalShift = (reqFlowRate-globalShift*step(c_Y)*step(c_Z))/surf 
+    
+    ! Step 4 : update velocity(c_X)
+    velocity(c_X,:,:,:) = velocity(c_X,:,:,:) + globalShift
+
+  end subroutine shiftVelocityX
+
+end module Fields
diff --git a/HySoP/src/main/Penalize.f90 b/HySoP/src/Penalize.f90
similarity index 90%
rename from HySoP/src/main/Penalize.f90
rename to HySoP/src/Penalize.f90
index c52c37779..6aaf75912 100755
--- a/HySoP/src/main/Penalize.f90
+++ b/HySoP/src/Penalize.f90
@@ -1,5 +1,4 @@
 !> Penalization stuff (init chi, penalize vorticity)
-!! Note : drag/lift are also computed with penalize routines. 
 module penalisation
 
   use client_data
@@ -36,20 +35,20 @@ contains
     !localDiagnostics=0.0
 
     coef=1./(1.+dt*lambda)
-
+    
     !do k=1,size(chi_boundary,2)
     forall(k=1:size(chi_boundary,2))
        vel(:,chi_boundary(1,k),chi_boundary(2,k),chi_boundary(3,k),:)=&
             coef*vel(:,chi_boundary(1,k),chi_boundary(2,k),chi_boundary(3,k),:)
     end forall!do
     !forall(k=1:size(chi_sphere,2))
-    !do k=1,size(chi_sphere,2)
-     !  vel(:,chi_sphere(1,k),chi_sphere(2,k),chi_sphere(3,k),:)=&
-      !      coef*vel(:,chi_sphere(1,k),chi_sphere(2,k),chi_sphere(3,k),:)
+       do k=1,size(chi_sphere,2)
+       vel(:,chi_sphere(1,k),chi_sphere(2,k),chi_sphere(3,k),:)=&
+            coef*vel(:,chi_sphere(1,k),chi_sphere(2,k),chi_sphere(3,k),:)
        ! if( all(chi_sphere(:,k) < resolution(:))) then 
        !   localDiagnostics=localDiagnostics + vel(:,chi_sphere(1,k),chi_sphere(2,k),chi_sphere(3,k),1)
        ! end if
-    !end do
+    end do
     !end forall
 
   end subroutine penalise_velocity
diff --git a/HySoP/src/main/SetsIndicators.f90 b/HySoP/src/SetsIndicators.f90
similarity index 73%
rename from HySoP/src/main/SetsIndicators.f90
rename to HySoP/src/SetsIndicators.f90
index 6f1f6a612..d2b97b96d 100755
--- a/HySoP/src/main/SetsIndicators.f90
+++ b/HySoP/src/SetsIndicators.f90
@@ -4,13 +4,14 @@ module SetsIndicators
 
   use client_data
   use VectorCalculus
+  use client_topology, only : nsublist
   use mpi,only:MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD
-  
+  use Domain
   implicit none
 
   private
 
-  public :: init_boundary_layer,compute_control_box,compute_test,nocaForces,laplacian,chi_sphere,chi_boundary,chi_box,&
+  public :: init_obstacles,compute_control_box,compute_test,nocaForces,laplacian,chi_sphere,chi_boundary,chi_box,&
        getMemoryForIndicators
 
   ! Indicators functions 
@@ -35,56 +36,80 @@ module SetsIndicators
   !> downstream boundary ind. func (ie xmin)
   integer, dimension(:,:), pointer :: chi_down => NULL() ! downstream 
   !> 
-  integer,parameter :: Down=1,Up=2,East=3,West=4,South=5,North=6
+  integer,parameter :: Down=1,Up=2,West=3,East=4,South=5,North=6
   !> Normal to each face of the control volume
   real(mk),dimension(dime,2*dime) :: normal
   !> A buffer for force on control volume, used to save force value on previous time step
   real(mk),dimension(dime)::bufferForce
   !> normalisation factor used to compute the drag in the Noca's way
   real(mk) :: coef
-  real(mk),parameter :: uinf=1
+  real(mk),parameter :: uinf=1.0
 
 contains
 
   !> compute chi functions for penalization at the boundaries to enforce dirichlet conditions on zmin and zmax
-  subroutine init_boundary_layer(resolution,step,lower,upper,coordMin)
+  subroutine init_obstacles(resolution,step,lower,upper,center,radius,layer,coordMin)
     !> Number of points in each dir 
     integer, dimension(dime), intent(in)  :: resolution
     !> Grid steps sizes
     real(mk), dimension(dime), intent(in) :: step
     !> Dimensions of the boundary layers
     real(mk), dimension(dime), intent(in) :: upper,lower
-    !> Lower point of the domain
+    !> position of the center of the sphere
+    real(mk),dimension(dime),intent(in):: center
+    !> Radius of the sphere
+    real(mk),intent(in) :: radius
+    !> Boundary layer thickness
+    real(mk), intent(in) :: layer
+    !> Coordinates of the lowest point of the local domain
     real(mk),dimension(dime),intent(in) :: coordMin
-    integer, dimension(:,:), allocatable :: tmp_boundary
+
+    integer, dimension(:,:), allocatable :: tmp_boundary,tmp_sphere
     integer::istat,i,j,k
-    
-    ! z position
-    real(mk) :: z
-    integer :: sizeMaxChi,count_boundary
-    
+    real(mk),dimension(dime) :: coords
+    real(mk) :: dist
+    integer :: sizeMaxChi,count_boundary,count_sphere
+    real :: layerMin,layerMax
     sizeMaxChi = product(resolution)
     allocate(tmp_boundary(dime,sizeMaxChi),stat=istat)
+    allocate(tmp_sphere(dime,sizeMaxChi),stat=istat)
+    
+    layerMin = lower(c_Z) + layer
+    layerMax = upper(c_Z) - layer
+
     if(istat.ne.0) stop 'Chi-boundaries function allocation error.'
     count_boundary=0
+    count_sphere =0
     do k=1,resolution(c_Z)
-       z=coordMin(c_Z) + (k-1)*step(c_Z)
-       if( (z>upper(c_Z)).or.(z<lower(c_Z))) then
-          do j=1,resolution(c_Y)
-             do i=1,resolution(c_X)
+       coords(c_Z)=coordMin(c_Z) + (k-1)*step(c_Z)
+       do j=1,resolution(c_Y)
+          coords(c_Y)=coordMin(c_Y) + (j-1)*step(c_Y)
+          do i=1,resolution(c_X)
+             coords(c_X)=coordMin(c_X) + (i-1)*step(c_X)
+             if( (coords(c_Z)>layerMax).or.(coords(c_Z)<layerMin)) then
                 count_boundary=count_boundary+1
                 tmp_boundary(c_X,count_boundary)=i
                 tmp_boundary(c_Y,count_boundary)=j
                 tmp_boundary(c_Z,count_boundary)=k
-             end do
+             end if
+             dist = dot_product(coords-center,coords-center) - radius**2
+             if(dist <=0.0) then ! We are on or in the sphere ...
+                count_sphere=count_sphere+1 
+                tmp_sphere(c_X,count_sphere)=i
+                tmp_sphere(c_Y,count_sphere)=j
+                tmp_sphere(c_Z,count_sphere)=k
+             end if
           end do
-       end if
+       end do
     end do
     allocate(chi_boundary(dime,count_boundary))
     chi_boundary=tmp_boundary(:,1:count_boundary)
-    deallocate(tmp_boundary)
+    allocate(chi_sphere(dime,count_sphere))
+    chi_sphere=tmp_sphere(:,1:count_sphere)
+    
+    deallocate(tmp_boundary,tmp_sphere)
     
-  end subroutine init_boundary_layer
+  end subroutine init_obstacles
 
   !> Compute indicator functions for the control box (including a sphere ...)
   ! This routine must fill all chi functions in
@@ -115,28 +140,33 @@ contains
     ! Size (number of points in each dir) of the control volume
     integer(kind=8), dimension(dime) :: boxDim
     real(mk) :: coord,dist
-    integer :: count,i,j,k,direction,count_sphere,count_box
+    integer :: count,i,j,k,direction,count_box
     integer(kind=8) :: nbPointsBox
-    integer,dimension(:,:),allocatable :: tmp_sphere,tmp_box
+    integer,dimension(:,:),allocatable :: tmp_box
 
+    !> radius of the sphere
+    real(mk) :: radiusBis
+
+    ! Add an assert to check that the sphere radius is shorter thant the box size ...
+    radiusBis=radius!-step(c_Z)
     ! First compute normals to the box boundaries
     normal(:,:)=0.0
-    normal(1,Up)=1.0
-    normal(1,Down)=-1.0
-    normal(2,East)=-1.0
-    normal(2,West)=1.0
-    normal(3,North)=1.0
-    normal(3,South)=-1.0
+    normal(c_X,Up)=1.0
+    normal(c_X,Down)=-1.0
+    normal(c_Y,West)=-1.0
+    normal(c_Y,East)=1.0
+    normal(c_Z,North)=1.0
+    normal(c_Z,South)=-1.0
     ! Coordinates of the upper point 
     coordMax(:)=coordMin(:)+(resolution(:)-1)*step(:) 
-    !! First : check if box boundaries are in the current domain
+    ! First : check if box boundaries are in the current domain
     isMaxIn=.False.
     isMinIn=.False.
     where(coordMin <= boxMin) isMinIn=.True.  ! Lower boundaries
     where(coordMax >= boxMax) isMaxIn=.True.  ! Upper boundaries
 
     !! Look for local indices corresponding to the box boundaries (i.e. x,y,z = boxMin() and boxMax())
-    ! ind order: Down,Up,East,West,South,North
+    ! index order: Down,Up,West,East,South,North
     ind=0 !
     where(isMinIn(:).and.isMaxIn(:)) 
        ind(1:2*dime:2)=1
@@ -166,43 +196,40 @@ contains
           end if
        end do
     end do
+    ! ind now contains the number of the points that are on the box boundary, in each direction, in the following order :
+    ! ind(Down Up West East South North)
+
     ! Remove last point to integrate properly ...
     ind(2:2*dime:2)=ind(2:2*dime:2)-1
-
+    
     ! Count the number of points on each face and inside the domain
     nbPoints=0
-    if(isMinIn(1)) nbPoints(Down)=(ind(West)-ind(East)+1)*(ind(North)-ind(South)+1)
-    if(isMaxIn(1))   nbPoints(Up)=(ind(West)-ind(East)+1)*(ind(North)-ind(South)+1)
-    if(isMinIn(3)) nbPoints(South)=(ind(West)-ind(East)+1)*(ind(Up)-ind(Down)+1)
-    if(isMaxIn(3)) nbPoints(North)=(ind(West)-ind(East)+1)*(ind(Up)-ind(Down)+1)
+    if(isMinIn(1)) nbPoints(Down)=(ind(East)-ind(West)+1)*(ind(North)-ind(South)+1)
+    if(isMaxIn(1))   nbPoints(Up)=(ind(East)-ind(West)+1)*(ind(North)-ind(South)+1)
+    if(isMinIn(3)) nbPoints(South)=(ind(East)-ind(West)+1)*(ind(Up)-ind(Down)+1)
+    if(isMaxIn(3)) nbPoints(North)=(ind(East)-ind(West)+1)*(ind(Up)-ind(Down)+1)
     if(isMinIn(2))  nbPoints(East)=(ind(Up)-ind(Down)+1)*(ind(North)-ind(South)+1)
     if(isMaxIn(2))  nbPoints(West)=(ind(Up)-ind(Down)+1)*(ind(North)-ind(South)+1)
 
     boxDim(c_X)=ind(Up)-ind(Down)+1
-    boxDim(c_Y)=ind(West)-ind(East)+1
+    boxDim(c_Y)=ind(East)-ind(West)+1
     boxDim(c_Z)=ind(North)-ind(South)+1
     nbPointsBox = boxDim(c_X)*boxDim(c_Y)*boxDim(c_Z)
     
-    allocate(tmp_box(dime,nbPointsBox),tmp_sphere(dime,nbPointsBox))
+    allocate(tmp_box(dime,nbPointsBox))
     allocate(chi_up(dime,nbPoints(Up)),chi_down(dime,nbPoints(Down)),chi_east(dime,nbPoints(East)),chi_west(dime,nbPoints(West)))
     allocate(chi_south(dime,nbPoints(South)),chi_north(dime,nbPoints(North)))
     count_box=0
-    count_sphere=0
 
     if(all(boxDim>0)) then
        do k=ind(South),ind(North)
           coords(c_Z) = coordMin(c_Z)+(k-1)*step(c_Z)
-          do j=ind(East),ind(West)
+          do j=ind(West),ind(East)
              coords(c_Y) = coordMin(c_Y)+(j-1)*step(c_Y)
              do i=ind(Down),ind(Up)
                 coords(c_X) = coordMin(c_X)+(i-1)*step(c_X)
-                dist = dot_product(coords-center,coords-center) - radius**2
-                if(dist <=0.0) then ! We are on or in the sphere ...
-                   count_sphere=count_sphere+1 
-                   tmp_sphere(c_X,count_sphere)=i
-                   tmp_sphere(c_Y,count_sphere)=j
-                   tmp_sphere(c_Z,count_sphere)=k
-                else ! we are in the control volume
+                dist = dot_product(coords-center,coords-center) - radiusBis**2
+                if(dist >=0.0) then ! We are on or outside the sphere ...
                    count_box = count_box+1
                    tmp_box(c_X,count_box)=i
                    tmp_box(c_Y,count_box)=j
@@ -212,15 +239,14 @@ contains
           end do
        end do
     end if
-
-    allocate(chi_sphere(dime,count_sphere),chi_box(dime,count_box))
-    chi_sphere=tmp_sphere(:,1:count_sphere)
+    allocate(chi_box(dime,count_box))
     chi_box=tmp_box(:,1:count_box)
-    deallocate(tmp_box,tmp_sphere)
+    
+    deallocate(tmp_box)
     if(isMinIn(3)) then ! South boundary
        count=1
        chi_south(3,:)=ind(South)
-       do j=ind(East),ind(West)
+       do j=ind(West),ind(East)
           do i=ind(Down),ind(Up)
              chi_south(1,count)=i
              chi_south(2,count)=j
@@ -230,7 +256,7 @@ contains
     end if
     if(isMinIn(2)) then ! East boundary
        count=1
-       chi_east(2,:)=ind(East)
+       chi_east(2,:)=ind(East)+1
        do k=ind(South),ind(North)
           do i=ind(Down),ind(Up)
              chi_east(1,count)=i
@@ -243,7 +269,7 @@ contains
        count=1
        chi_down(1,:)=ind(Down)
        do k=ind(South),ind(North)
-          do j=ind(East),ind(West)
+          do j=ind(West),ind(East)
              chi_down(2,count)=j
              chi_down(3,count)=k
              count=count+1
@@ -253,8 +279,8 @@ contains
 
     if(isMaxIn(3)) then ! North boundary is in the domain
        count=1
-       chi_north(3,:)=ind(North)
-       do j=ind(East),ind(West)
+       chi_north(3,:)=ind(North)+1
+       do j=ind(West),ind(East)
           do i=ind(Down),ind(Up)
              chi_north(1,count)=i
              chi_north(2,count)=j
@@ -275,9 +301,9 @@ contains
     end if
     if(isMaxIn(1)) then ! Upstream boundary is in the domain
        count=1
-       chi_up(1,:)=ind(Up)
+       chi_up(1,:)=ind(Up)+1
        do k=ind(South),ind(North)
-          do j=ind(East),ind(West)
+          do j=ind(West),ind(East)
              chi_up(2,count)=j
              chi_up(3,count)=k
              count=count+1
@@ -285,12 +311,12 @@ contains
        end do
     end if
 
-    bufferForce = 0.0
-    
+    bufferForce = 0.0    
     ! Compute coef used to calculate the drag in the Nocas's way
-    coef = 1./(0.5*uinf**2*pi*radius**2)
+    coef = 2./(uinf**2*pi*radius**2)
 
   end subroutine compute_control_box
+  
 
   !> Set input field to one on the control volume boundaries and to zero elsewhere 
   subroutine compute_test(testfield,chi)
@@ -309,7 +335,7 @@ contains
     !> The force to be computed
     real(mk), dimension(dime),intent(inout) :: force
     !! velocity and vorticity fields, intent(in)
-    real(mk), dimension(:,:,:,:,:),intent(in),pointer :: velo,vort
+    real(mk), dimension(:,:,:,:,:),pointer :: velo,vort
     !> viscosity
     real(mk),intent(in)::nu
     !! Coordinates of the lowest point in the domain
@@ -329,20 +355,21 @@ contains
 
     ! Downstream and upstream surface
     dsurf=step(c_Y)*step(c_Z)
-    !call integrateOnSurface(localForce,velo,vort,chi_down,normal(:,Down),c_X,nu,dsurf,coordMin,step)
-    !call integrateOnSurface(localForce,velo,vort,chi_up,normal(:,Up),c_X,nu,dsurf,coordMin,step)
+    call integrateOnSurface(localForce,velo,vort,chi_down,normal(:,Down),c_X,nu,dsurf,coordMin,step)
+    call integrateOnSurface(localForce,velo,vort,chi_up,normal(:,Up),c_X,nu,dsurf,coordMin,step)
     ! East and West 
     dsurf=step(c_X)*step(c_Z)
-    !call integrateOnSurface(localForce,velo,vort,chi_east,normal(:,East),c_Y,nu,dsurf,coordMin,step)
-    !call integrateOnSurface(localForce,velo,vort,chi_west,normal(:,West),c_Y,nu,dsurf,coordMin,step)
+    call integrateOnSurface(localForce,velo,vort,chi_east,normal(:,East),c_Y,nu,dsurf,coordMin,step)
+    call integrateOnSurface(localForce,velo,vort,chi_west,normal(:,West),c_Y,nu,dsurf,coordMin,step)
     ! North and south
     dsurf=step(c_Y)*step(c_X)
-    !call integrateOnSurface(localForce,velo,vort,chi_south,normal(:,South),c_Z,nu,dsurf,coordMin,step)
+    call integrateOnSurface(localForce,velo,vort,chi_south,normal(:,South),c_Z,nu,dsurf,coordMin,step)
     call integrateOnSurface(localForce,velo,vort,chi_north,normal(:,North),c_Z,nu,dsurf,coordMin,step)
     ! over the volume ...
-    !call integrateOnBox(localForce,vort,chi_box,dvol,dt,coordMin,step)
+    call integrateOnBox(localForce,vort,chi_box,dvol,dt,coordMin,step)
+
     localForce=localForce*coef
-    write(*,'(a,3f10.5)') ' drag local: ', localForce
+    !write(*,'(a,3f10.5)') ' drag local: ', localForce
     call MPI_Reduce(localForce,force,dime,MPI_DOUBLE_PRECISION,MPI_SUM,0,MPI_COMM_WORLD,info)
 
   end subroutine nocaForces
@@ -367,11 +394,9 @@ contains
     ! coordinates of the current point
     real(mk),dimension(dime) :: coords,int1
     ! local indices
-    integer :: i,j,k,ind,nbSubs
+    integer :: i,j,k,ind
     real(mk)::fact
-    
-    nbSubs=1
-    fact =-dvol/(dt*(dime-1))
+    fact = -dvol/((dime-1)*dt)
     int1=0.0
     !! For all points in the box ...
     do ind=1,size(chi,2)
@@ -381,11 +406,10 @@ contains
        ! coordinates of the current point
        coords = coordMin + (chi(:,ind)-1)*step
        !! part1 of the force
-       int1=int1+cross_prod(coords,vort(:,i,j,k,nbSubs))
+       int1=int1+cross_prod(coords,vort(:,i,j,k,nsublist))
     end do
     force = force + fact*(int1-bufferForce)
     bufferForce = int1 ! Save for next time step ...
-    
   end subroutine integrateOnBox
 
   !> Compute integrals on surface to calculate forces acting on the body.
@@ -417,34 +441,29 @@ contains
     !! local coordinates
     real(mk), dimension(dime) :: coords
     real(mk), dimension(dime) :: int1,int2,nDivT,diff_dir,buffer
-    integer :: nbSubs
     
-    print *, rank, force
-
     fact = 1./(dime-1)
     ! For each point of the current plane ...
     int1=0.0
     int2=0.0
-    nbSubs=1
     diff_dir=0.0
-
     do ind=1,size(chi,2)
        i=chi(1,ind)
        j=chi(2,ind)
        k=chi(3,ind)
        !! part1 = 1/2(velocity.velocity)n - (n.velocity)velocity - 1/(dime-1)((n.velocity)(coord X vorticity) + (n.vorticity)(coord X velocity)
        ! 0.5*velocity.velocity
-       u_u=0.5*dot_product(velo(:,i,j,k,nbSubs),velo(:,i,j,k,nbSubs))
+       u_u=dot_product(velo(:,i,j,k,nsublist),velo(:,i,j,k,nsublist))
        ! normal.velocity
-       n_u=dot_product(velo(:,i,j,k,nbSubs),NormalVec(:))
+       n_u=dot_product(velo(:,i,j,k,nsublist),NormalVec(:))
        ! normal.vorticity
-       n_w=dot_product(vort(:,i,j,k,nbSubs),NormalVec(:))
+       n_w=dot_product(vort(:,i,j,k,nsublist),NormalVec(:))
        ! coordinates of the current point
        coords = coordMin + (chi(:,ind)-1)*step
        !! part1 of the force
-       int1=int1+u_u*NormalVec(:)-n_u*velo(:,i,j,k,nbSubs)&
-            - fact*n_u*cross_prod(coords,vort(:,i,j,k,nbSubs))&
-            + fact*n_w*cross_prod(coords,velo(:,i,j,k,nbSubs))
+       int1=int1+0.5*u_u*NormalVec(:)-n_u*velo(:,i,j,k,nsublist)&
+            - fact*n_u*cross_prod(coords,vort(:,i,j,k,nsublist))&
+            + fact*n_w*cross_prod(coords,velo(:,i,j,k,nsublist))
 
        !! part 2 of the force, the one concerning T = nu(nabla u + nabla uT)
        !! Considering that the box is a parallepiped, each normal is equal to something like 1 0 0 
@@ -454,19 +473,19 @@ contains
        nDivT = 0.0 ! n X nabla.T 
 
        if(direction==c_X) then ! face = Down or Up, d/dx
-          diff_dir = diffX(velo,i,j,k,step(direction),nbSubs)
-          nDivT(2) = -laplacian(velo,c_Z,i,j,k,step,nbSubs) ! Laplacian of velocity_y
-          nDivT(3) = laplacian(velo,c_Y,i,j,k,step,nbSubs) ! Laplacian of velocity_z
+          diff_dir = diffX(velo,i,j,k,step(direction),nsublist)
+          nDivT(2) = -laplacian(velo,c_Z,i,j,k,step,nsublist) ! Laplacian of velocity_y
+          nDivT(3) = laplacian(velo,c_Y,i,j,k,step,nsublist) ! Laplacian of velocity_z
        else if(direction==c_Y) then ! face = East or West, d/dy
-          diff_dir = diffY(velo,i,j,k,step(direction),nbSubs) 
-          nDivT(3)=-laplacian(velo,c_X,i,j,k,step,nbSubs) ! Laplacian of velocity_x
-          nDivT(1)=laplacian(velo,c_Z,i,j,k,step,nbSubs) ! Laplacian of velocity_z
+          diff_dir = diffY(velo,i,j,k,step(direction),nsublist) 
+          nDivT(3)=-laplacian(velo,c_X,i,j,k,step,nsublist) ! Laplacian of velocity_x
+          nDivT(1)=laplacian(velo,c_Z,i,j,k,step,nsublist) ! Laplacian of velocity_z
        else if(direction==c_Z) then ! face = North or South, d/dz
-          diff_dir = diffZ(velo,i,j,k,step(direction),nbSubs)
-          nDivT(2)=laplacian(velo,c_X,i,j,k,step,nbSubs) ! Laplacian of velocity_x
-          nDivT(1)=-laplacian(velo,c_Y,i,j,k,step,nbSubs) ! Laplacian of velocity_y
+          diff_dir = diffZ(velo,i,j,k,step(direction),nsublist)
+          nDivT(2)=laplacian(velo,c_X,i,j,k,step,nsublist) ! Laplacian of velocity_x
+          nDivT(1)=-laplacian(velo,c_Y,i,j,k,step,nsublist) ! Laplacian of velocity_y
        end if
-       buffer=nabla(velo,direction,i,j,k,step,nbSubs) + diff_dir + fact*cross_prod(coords,nDivT)
+       buffer=nabla(velo,direction,i,j,k,step,nsublist) + diff_dir + fact*cross_prod(coords,nDivT)
        buffer=NormalVec(direction)*nu*buffer
        int2=int2+buffer
 
diff --git a/HySoP/src/main/TestFunctions.f90 b/HySoP/src/TestFunctions.f90
similarity index 51%
rename from HySoP/src/main/TestFunctions.f90
rename to HySoP/src/TestFunctions.f90
index fad4a862e..813bfbc98 100755
--- a/HySoP/src/main/TestFunctions.f90
+++ b/HySoP/src/TestFunctions.f90
@@ -22,11 +22,11 @@ contains
     !> Coordinates of the local minimum point
     real(mk),dimension(dime),intent(in) :: coordMin
     ! rhs function values on the grid (i.e. -omega_ex)
-    real(mk), dimension(:,:,:,:,:), pointer :: rhs_ex
+    real(mk), dimension(:,:,:,:), pointer :: rhs_ex
     ! velocity function values on the grid
-    real(mk), dimension(:,:,:,:,:), pointer :: vel_ex
+    real(mk), dimension(:,:,:,:), pointer :: vel_ex
     ! Stream function values on the grid
-    real(mk), dimension(:,:,:,:,:), pointer :: psi_ex
+    real(mk), dimension(:,:,:,:), pointer :: psi_ex
     
     
     real(mk) :: x,y,z,cx,cy,cz,c2x,c2y,c2z,sx,sy,sz,s2x,s2y,s2z
@@ -54,17 +54,17 @@ contains
              sx=sin(pi*x)
              s2x=sin(2.*pi*x)
              
-             rhs_ex(1,i,j,k,nsublist) = 8.*s2y*pi**2*s2z
-             rhs_ex(2,i,j,k,nsublist) = 8.*s2x*pi**2*s2z
-             rhs_ex(3,i,j,k,nsublist)=  8.*s2x*pi**2*s2y
+             rhs_ex(1,i,j,k) = 8.*s2y*pi**2*s2z
+             rhs_ex(2,i,j,k) = 8.*s2x*pi**2*s2z
+             rhs_ex(3,i,j,k)=  8.*s2x*pi**2*s2y
              
-             vel_ex(1,i,j,k,nsublist) = 2.*s2x*pi*(c2y - c2z)
-             vel_ex(2,i,j,k,nsublist) = 2.*s2y*pi*(c2z - c2x)
-             vel_ex(3,i,j,k,nsublist) = 2.*s2z*pi*(c2x - c2y)
+             vel_ex(1,i,j,k) = 2.*s2x*pi*(c2y - c2z)
+             vel_ex(2,i,j,k) = 2.*s2y*pi*(c2z - c2x)
+             vel_ex(3,i,j,k) = 2.*s2z*pi*(c2x - c2y)
              
-             psi_ex(1,i,j,k,nsublist) = sy*sz
-             psi_ex(2,i,j,k,nsublist) = sx*sz
-             psi_ex(3,i,j,k,nsublist) = sx*sy
+             psi_ex(1,i,j,k) = sy*sz
+             psi_ex(2,i,j,k) = sx*sz
+             psi_ex(3,i,j,k) = sx*sy
              
           end do
        end do
@@ -76,7 +76,7 @@ contains
   subroutine init_vorticity(vorticity,resolution,step,coordMin,lower,upper)
 
     !> vorticity field
-    real(mk), dimension(:,:,:,:,:), pointer :: vorticity 
+    real(mk), dimension(:,:,:,:), pointer :: vorticity 
     !> the local mesh resolution
     integer,dimension(dime),intent(in) :: resolution
     !> size of mesh step in each dir
@@ -86,26 +86,27 @@ contains
     !> boundaries of the domain
     real(mk),dimension(dime),intent(in):: upper,lower
     integer :: i,j,k
-    real(mk) :: pi,x,y,z,physicalDomainSize_Z
+    real(mk) :: x,z,physicalDomainSize_Z
     real(mk) :: coef
 
-    physicalDomainSize_Z = (upper(3)- lower(3))/2.
-    pi = 4.0*atan(1.0_mk)
+    physicalDomainSize_Z = (upper(c_Z)- lower(c_Z))/2.
     vorticity = 0.0
     coef = -3./(physicalDomainSize_Z)**2
     
     do k=1,resolution(c_Z)
        z = coordMin(c_Z) + (k-1)*step(c_Z)
        do j=1,resolution(c_Y)
-          y = coordMin(c_Y) + (j-1)*step(c_Y)
+          !y = coordMin(c_Y) + (j-1)*step(c_Y)
           do i=1,resolution(c_X)
              x = coordMin(c_X) + (i-1)*step(c_X)
-             if( (z < upper(c_Z)).and.(z>lower(c_Z))) then
-                vorticity(c_Y,i,j,k,nsublist) = coef*z
-             endif
+             !if( (z < upper(c_Z)).and.(z>lower(c_Z))) then
+                vorticity(c_Y,i,j,k) = cos(2.*pi*z)!)coef*z
+             !endif
           end do
        end do
     end do
+    
+    
   end subroutine init_vorticity
   
   !> Computes the analytical values for stream function, velocity and vorticity such that
@@ -120,7 +121,7 @@ contains
     !> Coordinates of the minimal point of the local domain
     real(mk),dimension(dime),intent(in) :: coordMin
     ! rhs function values on the grid (i.e. -omega_ex)
-    real(mk), dimension(:,:,:,:,:), pointer :: rhs_ex,velocity,vorticity
+    real(mk), dimension(:,:,:,:), pointer :: rhs_ex,velocity,vorticity
     !> viscosity
     real(mk),intent(in) :: nu
 
@@ -146,17 +147,17 @@ contains
              sx=sin(pi*x)
              s2x=sin(2.*pi*x)
              
-             rhs_ex(1,i,j,k,nsublist) = 32.*s2y*pi**4*s2z*(-2.*nu+c2x*c2y-c2x*c2z)
-             rhs_ex(2,i,j,k,nsublist) = -32.*s2x*pi**4*s2z*(2.*nu-c2y*c2z+c2x*c2y)
-             rhs_ex(3,i,j,k,nsublist) = 32.*s2x*pi**4*s2y*(-2.*nu+c2x*c2z-c2z*c2y)
+             rhs_ex(1,i,j,k) = 32.*s2y*pi**4*s2z*(-2.*nu+c2x*c2y-c2x*c2z)
+             rhs_ex(2,i,j,k) = -32.*s2x*pi**4*s2z*(2.*nu-c2y*c2z+c2x*c2y)
+             rhs_ex(3,i,j,k) = 32.*s2x*pi**4*s2y*(-2.*nu+c2x*c2z-c2z*c2y)
 
-             vorticity(1,i,j,k,nsublist) = 8.*s2y*pi**2*s2z
-             vorticity(2,i,j,k,nsublist) = 8.*s2x*pi**2*s2z
-             vorticity(3,i,j,k,nsublist)=  8.*s2x*pi**2*s2y
+             vorticity(1,i,j,k) = 8.*s2y*pi**2*s2z
+             vorticity(2,i,j,k) = 8.*s2x*pi**2*s2z
+             vorticity(3,i,j,k)=  8.*s2x*pi**2*s2y
              
-             velocity(1,i,j,k,nsublist) = 2.*s2x*pi*(c2y - c2z)
-             velocity(2,i,j,k,nsublist) = 2.*s2y*pi*(c2z - c2x)
-             velocity(3,i,j,k,nsublist) = 2.*s2z*pi*(c2x - c2y)
+             velocity(1,i,j,k) = 2.*s2x*pi*(c2y - c2z)
+             velocity(2,i,j,k) = 2.*s2y*pi*(c2z - c2x)
+             velocity(3,i,j,k) = 2.*s2z*pi*(c2x - c2y)
              
              
           end do
@@ -169,7 +170,7 @@ contains
   subroutine test_particles(vorticity,velocity,resolution,step,coordMin)
     
     !> vorticity field
-    real(mk), dimension(:,:,:,:,:), pointer :: vorticity ,velocity
+    real(mk), dimension(:,:,:,:), pointer :: vorticity ,velocity
     !> the local resolution
     integer, dimension(dime),intent(in) :: resolution
     !> size of mesh step in each dir
@@ -191,8 +192,8 @@ contains
              x = coordMin(c_X) + (i-1)*step(c_X)
              if( (x .eq.10*step(c_X)).and.(y.eq.0).and.(z.eq.xref)) then
                 do l=1,np
-                   vorticity(3,i,j,k+l-1,nsublist) =  0.1;
-                   velocity(3,i,j,k+l-1,nsublist) = 2.0
+                   vorticity(3,i,j,k+l-1) =  0.1;
+                   velocity(3,i,j,k+l-1) = 2.0
                 enddo
              endif
           end do
@@ -206,7 +207,7 @@ contains
   !> computes \f$  \omega(x,y,z) =  \left[\begin{array}{c} cos(x) \\ cos(x) \\ cos(x) \end{array}\right]\f$
   subroutine test_vorticity(vorticity,resolution,step,coordMin,lower,upper)
     ! vorticity field
-    real(mk), dimension(:,:,:,:,:), pointer :: vorticity 
+    real(mk), dimension(:,:,:,:), pointer :: vorticity 
     !> the local resolution
     integer, dimension(dime),intent(in) :: resolution
     !> size of mesh step in each dir
@@ -230,10 +231,153 @@ contains
           y = coordMin(c_Y) + (j-1)*step(c_Y)
           do i=1,resolution(c_X)
              x = coordMin(c_X) + (i-1)*step(c_X)
-             vorticity(:,i,j,k,nsublist) = cos(x)
+             vorticity(:,i,j,k) = cos(x)
           end do
        end do
     end do
   end subroutine test_vorticity
 
+
+  !> Compute flow through x = xmin surface, for the following stream function :
+  !> \f$  \psi(x,y,z) =  \left[\begin{array}{c} 0 \\-U_{inf}z(1 - \frac{R^2}{z^2+x^2})  \\ 0 \end{array}\right] \f$
+  !> i.e.
+  !> \f[ flowRate_{theo} = \left[ -U_{inf}length_y(z - \frac{R^2z}{z^2+x^2})\right]_{lower_z}^{upper_z}\f]
+  function requiredFlowRate3D(radius,length,lower,upper,uinf)
+
+    !> sphere radius
+    real(mk),intent(in) :: radius
+    !> domain dimensions
+    real(mk),dimension(dime),intent(in) :: length
+    !> physical domain lower point
+    real(mk),dimension(dime),intent(in)::lower
+    !> physical domain upper point
+    real(mk),dimension(dime),intent(in)::upper
+    !> velocity inf
+    real(mk),intent(in) :: uinf
+    !> required flow rate
+    real(mk) :: requiredFlowRate3D
+    
+    real(mk) :: dom
+    ! position of the surface for flow rate computation
+    real(mk)::xPos
+
+    xPos = lower(c_X)
+    dom = upper(c_Z)**2+xPos**2
+    if(abs(dom) < epsilon(dom)) then ! if dom == 0
+       requiredFlowRate3D = upper(c_Z)
+    else
+       requiredFlowRate3D = upper(c_Z)*(1.-radius**2/dom)
+    end if
+    dom = lower(c_Z)**2+xPos**2
+    if(abs(dom) < epsilon(dom)) then
+       requiredFlowRate3D = requiredFlowRate3D - lower(c_Z)
+    else
+       requiredFlowRate3D = requiredFlowRate3D - lower(1. - radius**2/dom)
+    end if
+    requiredFlowRate3D = requiredFlowRate3D * uinf *length(c_Y)
+    return
+  end function requiredFlowRate3D
+
+    !> Compute flow through x = xmin surface, for the following stream function :
+  !> \f$  \psi(x,y,z) =  \left[\begin{array}{c} 0 \\-U_{inf}z(1 - \frac{R^2}{z^2+x^2})  \\ 0 \end{array}\right] \f$
+  !> i.e.
+  !> \f[ flowRate_{theo} = \left[ -U_{inf}length_y(z - \frac{R^2z}{z^2+x^2})\right]_{lower_z}^{upper_z}\f]
+  function requiredFlowRate2D(radius,length,lower,upper,uinf)
+
+    !> sphere radius
+    real(mk),intent(in) :: radius
+    !> domain dimensions
+    real(mk),dimension(dime),intent(in) :: length
+    !> physical domain lower point
+    real(mk),dimension(dime),intent(in)::lower
+    !> physical domain upper point
+    real(mk),dimension(dime),intent(in)::upper
+    !> velocity inf
+    real(mk),intent(in) :: uinf
+    !> required flow rate
+    real(mk) :: requiredFlowRate2D
+    
+    real(mk) :: dom
+    ! position of the surface for flow rate computation
+    real(mk)::xPos
+
+    xPos = lower(c_X)
+    dom = upper(c_Y)**2+xPos**2
+    if(abs(dom) < epsilon(dom)) then ! if dom == 0
+       requiredFlowRate2D = upper(c_Y)
+    else
+       requiredFlowRate2D = upper(c_Y)*(1.-radius**2/dom)
+    end if
+    dom = lower(c_Y)**2+xPos**2
+    if(abs(dom) < epsilon(dom)) then
+       requiredFlowRate2D = requiredFlowRate2D - lower(c_Y)
+    else
+       requiredFlowRate2D = requiredFlowRate2D - lower(c_Y)*(1. - radius**2/dom)
+    end if
+    requiredFlowRate2D = requiredFlowRate2D * uinf
+    return
+  end function requiredFlowRate2D
+
+
+  subroutine Gaussian2D(field,resolution,step,coordMin,center)
+    
+    !> Field initialized with a Gaussian
+    real(mk), dimension(:,:,:), pointer :: field
+    !> Space step
+    real(mk), dimension(dime) :: step
+    !> local resolution
+    integer, dimension(dime) :: resolution
+    !> Coordinates of the lowest point in the current subdomain
+    real(mk),dimension(dime),intent(in) :: coordMin
+    !> 
+    real(mk), dimension(dime),intent(in) :: center
+           
+    real(mk), parameter :: sigma = 0.2
+    real(mk) :: expo
+    real(mk),dimension(dime) :: coord
+    integer :: i,j
+
+    field = 0.0
+    do j = 1,resolution(c_Y)
+       coord(c_Y) = coordMin(c_Y)+(j-1)*step(c_Y)-center(c_Y)
+       do i = 1, resolution(c_X)
+          coord(c_X) = coordMin(c_X) + (i-1)*step(c_X)-center(c_X)
+          expo=dot_product(coord,coord)*0.5/sigma**2
+          field(i,j,:) = exp(-expo)
+       end do
+    end do
+
+  end subroutine Gaussian2D
+
+  subroutine Gaussian1D(field,resolution,step,coordMin,dir,shift)
+    
+    !> Field initialized with a Gaussian
+    real(mk), dimension(:,:,:), pointer :: field
+    !> Space step
+    real(mk), dimension(dime) :: step
+    !> local resolution
+    integer, dimension(dime) :: resolution
+    !> Coordinates of the lowest point in the current subdomain
+    real(mk),dimension(dime),intent(in) :: coordMin
+    !> 
+    real(mk), intent(in) :: shift
+    !> Advection direction
+    integer, intent(in) :: dir
+
+    real(mk), parameter :: sigma = 0.2
+    real(mk) :: coord,coeff
+    integer :: i
+
+    !coord(c_Y) = coordMin(c_Y) + (j-1)*step(c_Y)
+    coeff = 1./(sigma*sqrt(2.*pi))
+    print * ,"shift",shift
+    field = 0.
+    do i = 1, resolution(dir)
+       coord = coordMin(dir) + (i-1)*step(dir)-shift
+       field(i,:,1) = coeff*exp(-0.5*(coord/sigma)**2.)
+    end do
+
+  end subroutine Gaussian1D
+  
+
 end module testsFunctions
diff --git a/HySoP/src/main/Topology.f90 b/HySoP/src/Topology.f90
similarity index 50%
rename from HySoP/src/main/Topology.f90
rename to HySoP/src/Topology.f90
index 6fde19092..77147a660 100755
--- a/HySoP/src/main/Topology.f90
+++ b/HySoP/src/Topology.f90
@@ -6,15 +6,28 @@ module client_topology
   use ppm_module_mktopo
   use ppm_module_topo_get
   use ppm_module_mesh_define
-  use ppm_module_data, only : ppm_param_assign_internal,ppm_param_decomp_cartesian,ppm_topo
-  
-  use client_data, only: mk,dime
+  use ppm_module_data, only : ppm_param_assign_internal,ppm_param_decomp_cartesian,ppm_topo,&
+       ppm_param_decomp_xpencil,ppm_param_decomp_ypencil,ppm_param_decomp_zpencil
+
+  use client_data, only : mk,dime,dim3
   use parmesTools
 
   implicit none
 
+  !> A pointer to a ppm topology
+  type :: topoPtr
+     !> pointer to ppm topo
+     type(ppm_t_topo), pointer :: ptr
+     !> id of the local mesh on the current topo (Warning : we consider only 1 mesh/topo, at the present time)
+     integer :: meshid
+  end type topoPtr
+
   !> A pointer to ppm topology object 
   type(ppm_t_topo), pointer :: topo => null()
+  !> Array of 1D topologies for FFTW solvers
+  type(topoPtr), dimension(dime) :: topo1D
+  !> 1D topology for FFTW solvers
+  type(ppm_t_topo), pointer :: topoY => NULL()
   !> number of sub-domains on the current (mpi)proc. This is for ppm and should be 1 in our case. 
   integer :: nsublist
   !> number of the current mpi proc according to ppm. May be different from rank and is required.
@@ -24,12 +37,12 @@ module client_topology
 
   private
 
-  public init_topo, topo, ppm_t_topo, getPPMLocalResolution,nsublist,isubl
+  public PPMinitTopo, topo, meshNum, ppm_t_topo, getPPMLocalResolution,nsublist,isubl,createTopologyY,topoY
 
 contains
 
   !> Create the topology - Based on ppm_topo_mkfield routine (interface = ppm_mktopo)
-  subroutine init_topo(minPos,maxPos,bc,ghostsize,resolution)
+  subroutine PPMinitTopo(minPos,maxPos,bc,ghostsize,resolution)
 
     real(mk), dimension(:),pointer :: minpos,maxpos
     integer,  dimension(:),pointer :: bc
@@ -52,8 +65,9 @@ contains
     ! topology
     ! Note FP: ghostsize are never used in mktopo for decomp_cartesian.
     ! Note2 FP: struct topo as input results in failure. 
-    call ppm_mktopo(topoid,meshid, false_xp, false_np, decomposition, assigning, minPos, maxPos, bc, ghostsize, subcost, &
-         resolution, info)
+
+    call ppm_mktopo(topoid,meshid,false_xp,false_np,decomposition,assigning,minPos(1:dime),maxPos(1:dime),bc, & 
+         ghostsize,subcost,resolution(1:dime),info)
     !decomposition = ppm_param_decomp_xy_slab
     !ghostsize =0.0
     !CALL ppm_mktopo(topoid,meshid,false_xp,0,decomposition,assigning,minPos,maxPos,bc,&
@@ -86,6 +100,8 @@ contains
     call parmesAssert(meshNum,1,&
          'it seems that several meshes are defined on the current sub-domain, which may result in simulation failure.')
 
+    print *, rank, "AAAAAAAAAAAAAA", shape(topo%mesh(meshNum)%nnodes), "nbnbnb", topo%mesh(meshNum)%nnodes
+    
 !!$    call ppm_topo_get_meshinfo(topoid,meshid,nm,istart,ndata,maxndata,isublist,nsublist,info)
 !!$    
 !!$    print *, 'nm', nm
@@ -111,14 +127,99 @@ contains
 !!$    print *, '======================= [', rank,'] ', shape(topo%mesh(1)%istart)
 !!$    print *, '======================= [', rank,'] ', topo%mesh(1)%Nm
 
-  end subroutine init_topo
+  end subroutine PPMinitTopo
 
   !> Return the local (mpi) subdomain resolution
-  function getPPMLocalResolution()
-    
-    integer,dimension(dime) :: getPPMLocalResolution
-    getPPMLocalResolution = maxval(topo%mesh(meshNum)%nnodes(:,topo%isublist(1:topo%nsublist)),2) 
+  function getPPMLocalResolution(localTopo,meshid)
     
+    !> topology of interest
+    type(ppm_t_topo), pointer :: localTopo
+    !> id of the mesh from which we need to get the resolution
+    integer, intent(in) :: meshid
+    integer,dimension(dim3) :: getPPMLocalResolution
+
+    getPPMLocalResolution = 1
+    getPPMLocalResolution(1:dime) = maxval(localTopo%mesh(meshid)%nnodes(:,localTopo%isublist(1:localTopo%nsublist)),2) 
+
   end function getPPMLocalResolution
 
+  !> 1D topologies creation, for fftw solvers
+  !! 
+  subroutine create1DTopologies(minPos,maxPos,bc,resolution)
+    
+    real(mk), dimension(:),pointer :: minpos,maxpos
+    integer,  dimension(:),pointer :: bc
+    integer,  dimension(:),pointer :: resolution
+    
+    ! Unused parameter that must be present as an input arg ...
+    real(mk), dimension(:,:),pointer :: false_xp => NULL()
+    real(mk), dimension(:),pointer :: subcost => NULL()
+    integer :: false_np
+    integer :: assigning
+    integer, dimension(dim3) :: decomposition
+    integer :: info, meshid, topoid,i
+    integer,dimension(dim3), parameter :: zeros = (/0,0,0/)
+    
+    info = 0
+    decomposition(c_X) = ppm_param_decomp_xpencil
+    decomposition(c_Y) = ppm_param_decomp_ypencil
+    decomposition(c_Z) = ppm_param_decomp_zpencil
+    assigning     = ppm_param_assign_internal
+    false_np = 0 ! Purely mesh-based decomposition
+    
+    ! No ghosts for these topologies
+
+    ! loop over dimensions
+    do i=1,dime
+       topoid = 0
+       meshid = -1
+       call ppm_mktopo(topoid,meshid,false_xp,0,decomposition(i),assigning,minpos(1:dime),maxpos(1:dime),bc,&
+            zeros(1:dime),subcost,resolution(1:dime),info)
+       topo1D(i)%ptr=>ppm_topo(topoid)%t
+       topo1D(i)%meshid = meshid
+    end do
+
+    print *,"iijiji", topo1D(c_X)%ptr%mesh(meshid)%nnodes
+    
+  end subroutine create1DTopologies
+
+  !> 1D topologies creation, for fftw solvers
+  !! 
+  subroutine createTopologyY(topoid,meshid,minPos,maxPos,bc,resolution)
+    
+    real(mk), dimension(:),pointer :: minpos,maxpos
+    integer,  dimension(:),pointer :: bc
+    integer,  dimension(:),pointer :: resolution
+    integer, intent(inout) :: topoid
+    integer, intent(inout) :: meshid
+
+    ! Unused parameter that must be present as an input arg ...
+    real(mk), dimension(:,:),pointer :: false_xp => NULL()
+    real(mk), dimension(:),pointer :: subcost => NULL()
+    integer :: false_np
+    integer :: assigning
+    integer  :: decomposition
+    integer :: info
+    integer,dimension(dime), parameter :: zeros = (/0,0/)
+    
+    info = 0
+
+    decomposition = ppm_param_decomp_xpencil
+    assigning     = ppm_param_assign_internal
+    false_np = 0 ! Purely mesh-based decomposition
+    
+    ! No ghosts for these topologies
+    ! loop over dimensions
+    topoid = 0
+    meshid = -1
+    call ppm_mktopo(topoid,meshid,false_xp,0,decomposition,assigning,minpos(1:dime),maxpos(1:dime),bc,&
+         zeros(1:dime),subcost,resolution(1:dime),info)
+    topoY=>ppm_topo(topoid)%t
+    
+    print *,rank,"iijiji", topoY%mesh(meshid)%nnodes
+    print *,rank,"iooooooooijiji", getPPMLocalResolution(topoY,meshid)
+    
+  end subroutine createTopologyY
+
+
 end module client_topology
diff --git a/HySoP/src/Unstable/LEGI/CMakeLists.txt b/HySoP/src/Unstable/LEGI/CMakeLists.txt
deleted file mode 100644
index 302d6cd4a..000000000
--- a/HySoP/src/Unstable/LEGI/CMakeLists.txt
+++ /dev/null
@@ -1,207 +0,0 @@
-#=======================================================
-# cmake utility to compile and install JB library
-#
-# F. Pérignon, june 2012
-#
-#=======================================================
-
-# ============= Global cmake Settings ============= 
-# Set minimum version for cmake
-cmake_minimum_required(VERSION 2.8.7)
-# Set cmake modules directory (i.e. the one which contains all user-defined FindXXX.cmake files among other things)
-set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/CMake)
-# Force out-of-source build
-include(OutOfSourceBuild)
-# Some usefull macros
-include(MyTools)
-
-# User defined options
-option(VERBOSE_MODE "enable verbose mode for cmake exec. Default = on" ON)
-option(USE_MPI "compile and link parmes with mpi when this mode is enable. Default = on." ON)
-option(DOUBLEPREC "set precision for real numbers to double precision when this mode is enable. Default = on." ON)
-option(WITH_TESTS "Enable testing. Default = on" OFF)
-option(BUILD_SHARED_LIBS "Enable dynamic library build, default = ON" ON)
-
-# cmake project name
-set(PROJECT_NAME parmeslegi)
-# --- Name for the package ---
-# This name will be used to install parmeslegi (library, headers, ...) and when another lib or soft will need to search for parmeslegi.
-set(PACKAGE_NAME "parmeslegi")
-# --- Set a version number for the package ---
-set(${PACKAGE_NAME}_version 1.0.0)
-# --- The name (without extension) of the lib to be created ---
-set(PROJECT_LIBRARY_NAME ${PROJECT_NAME})
-# --- The name of the exe to be created (test purpose)
-# This exe will be linked with libPROJECT_LIBRARY_NAME 
-set(EXE_NAME ${PROJECT_NAME}Run)
-# The list of all dirs containing sources to be compiled for the parmeslegi lib
-# Any file in those dirs will be used to create libparmes
-set(${PROJECT_LIBRARY_NAME}_SRCDIRS 
-  src
-  src/Layout
-  src/particles
-  src/output
-  )
-# A main file to create an executable (test purpose)
-# Any files in these dirs will be used to create parmeslegi exec (linked with libparmes)
-set(${EXE_NAME}_SRCDIRS test/ test/src/ test/src/Test_advec
-  test/src/Test_io test/src/Test_topo)
-# Matching expr for files to be compiled. 
-set(EXTS *.f90 *.F90)
-# Matching expr for headers (install purpose)
-set(EXTS_HDRS *.hpp *.h)
-#
-# ============= The project =============
-# Set project name and project languages 
-# => this automatically defines:
-#   - ${PROJECT_NAME}_BINARY_DIR : where you have run cmake, i.e. the place for compilation
-#   - ${PROJECT_NAME}_SOURCE_DIR : where sources (.f and .h and this CMakeLists.txt) are located
-# Note that because of OutOfSourceBuild, binary_dir and source_dir must be different. 
-
-project(${PROJECT_NAME} Fortran)
-
-# ============= Tests =============
-if(WITH_TESTS)
-  enable_testing()
-endif(WITH_TESTS)
-
-# ============= Search for libraries  =============
-# We search for libraries parmeslegi depends on and 
-# set the compile/link conf (-I and -L opt)
-
-# --- MPI ---
-if(USE_MPI)
-  # Find MPI for C++ and fortran.
-  find_package(MPI REQUIRED)
-  # -I
-  include_directories(${MPI_Fortran_INCLUDE_PATH})
-  # Add compilation flags
-  append_Fortran_flags(${MPI_Fortran_COMPILE_FLAGS})
-  set(${PROJECT_NAME}_LINK_FLAGS ${${PROJECT_NAME}_LINK_FLAGS} ${MPI_Fortran_LINK_FLAGS})
-  #endif(MPI_Fortran_COMPILER)
-  set(LIBS ${LIBS} ${MPI_Fortran_LIBRARIES} )
-endif()
-
-# ============= Prepare compilation =============
-
-# Force a default build type if not provided by user
-# CMAKE_BUILD_TYPE = empty, Debug, Release, RelWithDebInfo or MinSizeRel.
-if (NOT CMAKE_BUILD_TYPE)
-  set (CMAKE_BUILD_TYPE RELEASE CACHE STRING "Choose the type of build, options are: None, Debug, Release, RelWithDebInfo or MinSizeRel." FORCE)  
-endif (NOT CMAKE_BUILD_TYPE)
-
-# Set module files directory (i.e. where .mod will be created) 
-set(CMAKE_Fortran_MODULE_DIRECTORY ${CMAKE_BINARY_DIR}/Modules)
-#  Add compilation flags:
-append_Fortran_FLAGS("-Wall")
-
-# ============= Source and header files list =============
-# We scan all files with matching extension in directories 
-# containing sources.
-
-# Source and header files list:
-foreach(_DIR ${${PROJECT_LIBRARY_NAME}_SRCDIRS})
-  set(_DIR_FILES)
-  foreach(_EXT ${EXTS}) # Source files
-    file(GLOB _DIR_FILES_EXT ${_DIR}/${_EXT})
-    if(_DIR_FILES_EXT)
-      list(APPEND ${PROJECT_LIBRARY_NAME}_SRC ${_DIR_FILES_EXT})
-    endif()
-  endforeach()
-  foreach(_EXT ${EXTS_HDRS}) # Headers
-    file(GLOB _DIR_FILES_EXT ${_DIR}/${_EXT})
-    if(_DIR_FILES_EXT)
-      list(APPEND ${PROJECT_LIBRARY_NAME}_HDRS ${_DIR_FILES_EXT})
-    endif()
-  endforeach()
-endforeach()
-# We add headers to source files
-list(APPEND ${PROJECT_LIBRARY_NAME}_SRC ${${PROJECT_LIBRARY_NAME}_HDRS})
-
-# The same for main dir ...
-foreach(_DIR ${${EXE_NAME}_SRCDIRS})
-  set(_DIR_FILES)
-  foreach(_EXT ${EXTS})
-    file(GLOB _DIR_FILES_EXT ${_DIR}/${_EXT})
-    if(_DIR_FILES_EXT)
-      list(APPEND ${EXE_NAME}_SRC ${_DIR_FILES_EXT})
-    endif()
-  endforeach()
-  foreach(_EXT ${EXTS_HDRS}) 
-    file(GLOB _DIR_FILES_EXT ${_DIR}/${_EXT})
-    if(_DIR_FILES_EXT)
-      list(APPEND ${EXE_NAME}_HDRS ${_DIR_FILES_EXT})
-    endif()
-  endforeach()
-endforeach()
-list(APPEND ${EXE_NAME}_SRC ${${EXE_NAME}_HDRS})
-
-# Add directories to those searched by compiler ...
-# -I
-include_directories(${${PROJECT_LIBRARY_NAME}_SRCDIRS})
-include_directories(${${EXE_NAME}_HDRS})
-include_directories(${CMAKE_Fortran_MODULE_DIRECTORY})
-
-# ============= Creates the library =============
-if(BUILD_SHARED_LIBS) # shared library
-  add_library(${PROJECT_LIBRARY_NAME} SHARED ${${PROJECT_LIBRARY_NAME}_SRC})
-else() # static library
-  add_library(${PROJECT_LIBRARY_NAME} STATIC ${${PROJECT_LIBRARY_NAME}_SRC})
-endif()
-# Libs to link with PROJECT__LIBRARY_NAME
-target_link_libraries(${PROJECT_LIBRARY_NAME} ${LIBS})
-
-# ============= Creates the executable =============
-add_executable(${EXE_NAME} ${${EXE_NAME}_SRC})
-add_dependencies(${EXE_NAME} ${PROJECT_LIBRARY_NAME})
-
-# libs to link with EXE_NAME
-target_link_libraries(${EXE_NAME} ${PROJECT_LIBRARY_NAME})
-target_link_libraries(${EXE_NAME} ${LIBS})
-
-# ============== Add tests ==============
-if(WITH_TESTS)
-  message(STATUS "Enable testing ...")
-  begin_test(src/tests/F2003)
-  new_test(testAllocatedPtr userMod.f90 wrapper.f90 testAllocatedPtr.cxx)
-  new_test(testNullPtr userMod.f90 wrapper.f90 testNullPtr.cxx)
-  end_test()
-endif(WITH_TESTS)
-
-# ============= Prepare install =============
-
-# The library
-# The library, the headers and mod files, the cmake generated files
-# will be install in CMAKE_INSTALL_PREFIX/lib include and share
-#include(InstallPackage)
-
-#install_package(${PACKAGE_NAME} ${PROJECT_LIBRARY_NAME} ${${PROJECT_NAME}_HDRS})
-
-#install(TARGETS ${EXE_NAME}  
-#RUNTIME DESTINATION bin  # executables
-#  )
-
-# ============= RPATH =============
-# Concerning rpath see for example http://www.itk.org/Wiki/CMake_RPATH_handling
-
-# --------------------------------------------
-# do not skip the full RPATH for the build tree
-set(CMAKE_SKIP_BUILD_RPATH  FALSE)
-# when building, don't use the install RPATH already
-# (but later on when installing)
-set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) 
-# the RPATH to be used when installing
-set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib")
-# add the automatically determined parts of the RPATH
-# which point to directories outside the build tree to the install RPATH
-set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
-
-# ============= Summary =============
-if(VERBOSE_MODE)
-  message(STATUS "====================== Summary ======================")
-  message(STATUS " Compiler : ${CMAKE_Fortran_COMPILER}")
-  message(STATUS " Sources are in : ${CMAKE_SOURCE_DIR}")
-  message(STATUS " Project uses MPI : ${USE_MPI}")
-  message(STATUS " Project will be installed in ${CMAKE_INSTALL_PREFIX}")
-  message(STATUS "====================== ======= ======================")
-endif()
diff --git a/HySoP/src/main/VectorCalc.f90 b/HySoP/src/VectorCalc.f90
similarity index 53%
rename from HySoP/src/main/VectorCalc.f90
rename to HySoP/src/VectorCalc.f90
index 034df64ea..dd6691c15 100755
--- a/HySoP/src/main/VectorCalc.f90
+++ b/HySoP/src/VectorCalc.f90
@@ -3,55 +3,58 @@ module vectorcalculus
 
   use client_data
   use client_topology, only:nsublist
+  use mpi
   implicit none
 
 contains
   
   !> compute \f[ fieldout = \nabla \times fieldin \f] using 4th order finite differences
-  subroutine curldf4(fieldin,fieldout,resolution,step)
+  subroutine curlDF4(fieldin,fieldout,resolution,step)
     !> input field
     real(mk), dimension(:,:,:,:,:), pointer :: fieldin
     !> output field 
     real(mk), dimension(:,:,:,:,:), pointer :: fieldout
     !> the local resolution
-    integer, dimension(dime),intent(in) :: resolution
+    integer, dimension(dim3),intent(in) :: resolution
     !> size of mesh step in each dir
-    real(mk), dimension(dime),intent(in) :: step
+    real(mk), dimension(dim3),intent(in) :: step
  
     real(mk) :: facx, facy, facz
     integer :: i,j,k
 
-
-    facx = 1.0_mk/(12.0_mk*step(c_X))
-    facy = 1.0_mk/(12.0_mk*step(c_Y))
-    facz = 1.0_mk/(12.0_mk*step(c_Z))
-
+    fieldout = 0.0
+    
+    facx = 1.0/(12.0*step(c_X))
+    facy = 1.0/(12.0*step(c_Y))
+    facz = 1.0/(12.0*step(c_Z))
+    
     do k=1,resolution(c_Z)
        do j=1,resolution(c_Y)
           do i=1,resolution(c_X)
              fieldout(c_X,i,j,k,nsublist) = -facz*(&
-                  -fieldin(c_Y,i,j,k+2,nsublist)+8.0_mk*fieldin(c_Y,i,j,k+1,nsublist)-8.0_mk&
+                  -fieldin(c_Y,i,j,k+2,nsublist)+8.0*fieldin(c_Y,i,j,k+1,nsublist)-8.0&
                   *fieldin(c_Y,i,j,k-1,nsublist)+fieldin(c_Y,i,j,k-2,nsublist)&
                   )  + facy*(&
-                  -fieldin(c_Z,i,j+2,k,nsublist)+8.0_mk*fieldin(c_Z,i,j+1,k,nsublist)-8.0_mk&
+                  -fieldin(c_Z,i,j+2,k,nsublist)+8.0*fieldin(c_Z,i,j+1,k,nsublist)-8.0&
                   *fieldin(c_Z,i,j-1,k,nsublist)+fieldin(c_Z,i,j-2,k,nsublist)&
                   )
              fieldout(c_Y,i,j,k,nsublist) = -facx*(&
-                  -fieldin(c_Z,i+2,j,k,nsublist)+8.0_mk*fieldin(c_Z,i+1,j,k,nsublist)-8.0_mk&
+                  -fieldin(c_Z,i+2,j,k,nsublist)+8.0*fieldin(c_Z,i+1,j,k,nsublist)-8.0&
                   *fieldin(c_Z,i-1,j,k,nsublist)+fieldin(c_Z,i-2,j,k,nsublist)&
                   ) + facz*( &
-                  -fieldin(c_X,i,j,k+2,nsublist)+8.0_mk*fieldin(c_X,i,j,k+1,nsublist)-8.0_mk&
-                  *fieldin(c_X,i,j,k-1,nsublist)+fieldin(c_X,i,j,k-2,nsublist))
+                  -fieldin(c_X,i,j,k+2,nsublist)+8.0*(fieldin(c_X,i,j,k+1,nsublist)-&
+                  fieldin(c_X,i,j,k-1,nsublist))+fieldin(c_X,i,j,k-2,nsublist))
              fieldout(c_Z,i,j,k,nsublist) = -facy*(&
-                  -fieldin(c_X,i,j+2,k,nsublist)+8.0_mk*fieldin(c_X,i,j+1,k,nsublist)-8.0_mk&
+                  -fieldin(c_X,i,j+2,k,nsublist)+8.0*fieldin(c_X,i,j+1,k,nsublist)-8.0&
                   *fieldin(c_X,i,j-1,k,nsublist)+fieldin(c_X,i,j-2,k,nsublist)&
                   ) +facx*(&
-                  -fieldin(c_Y,i+2,j,k,nsublist)+8.0_mk*fieldin(c_Y,i+1,j,k,nsublist)-8.0_mk&
+                  -fieldin(c_Y,i+2,j,k,nsublist)+8.0*fieldin(c_Y,i+1,j,k,nsublist)-8.0&
                   *fieldin(c_Y,i-1,j,k,nsublist)+fieldin(c_Y,i-2,j,k,nsublist))
           enddo
        enddo
     enddo
-  end subroutine curldf4
+    
+  end subroutine curlDF4
 
   !> Computes strech and diffusion terms. This is a copy of Adrien's code.
   subroutine computeRHS(velocity,vorticity,rhs,resolution,step,nu)
@@ -63,23 +66,22 @@ contains
     !> rhs, output
     real(mk), dimension(:,:,:,:,:), pointer :: rhs
     !> local mesh resolution
-    integer,dimension(dime),intent(in) :: resolution
+    integer,dimension(dim3),intent(in) :: resolution
     !> mesh step sizes
-    real(mk),  dimension(dime),intent(in) :: step
+    real(mk),  dimension(dim3),intent(in) :: step
     real(mk), intent(in) :: nu
 
     integer :: i,j,k
     real(mk), dimension(dime) :: tx, ty, tz, stretch,diffusion
     real(mk) :: facx,facy,facz, facx2,facy2,facz2
 
-    facx=1._mk/(12._mk*step(c_X))
-    facy=1._mk/(12._mk*step(c_Y))
-    facz=1._mk/(12._mk*step(c_Z))
-    facx2=nu/(12._mk*step(c_X)**2)
-    facy2=nu/(12._mk*step(c_Y)**2)
-    facz2=nu/(12._mk*step(c_Z)**2)
+    facx=1./(12.*step(c_X))
+    facy=1./(12.*step(c_Y))
+    facz=1./(12.*step(c_Z))
+    facx2=nu/(12.*step(c_X)**2)
+    facy2=nu/(12.*step(c_Y)**2)
+    facz2=nu/(12.*step(c_Z)**2)
 
-    print *, rank, 'rhs ...', resolution
     rhs = 0.0
     do k=1,resolution(c_Z)
        do j=1,resolution(c_Y)
@@ -87,56 +89,56 @@ contains
              !stretch
              !------
              tx(c_X)= &
-                  vorticity(c_X,i-2,j,k,nsublist)*velocity(c_X,i-2,j,k,nsublist)  - 8._mk*&
-                  vorticity(c_X,i-1,j,k,nsublist)*velocity(c_X,i-1,j,k,nsublist)  + 8._mk*&
+                  vorticity(c_X,i-2,j,k,nsublist)*velocity(c_X,i-2,j,k,nsublist)  - 8.*&
+                  vorticity(c_X,i-1,j,k,nsublist)*velocity(c_X,i-1,j,k,nsublist)  + 8.*&
                   vorticity(c_X,i+1,j,k,nsublist)*velocity(c_X,i+1,j,k,nsublist)  -  &
                   vorticity(c_X,i+2,j,k,nsublist)*velocity(c_X,i+2,j,k,nsublist)
 
              tx(c_Y)= &
-                  vorticity(c_X,i-2,j,k,nsublist)*velocity(c_Y,i-2,j,k,nsublist)  - 8._mk*&
-                  vorticity(c_X,i-1,j,k,nsublist)*velocity(c_Y,i-1,j,k,nsublist)  + 8._mk*&
+                  vorticity(c_X,i-2,j,k,nsublist)*velocity(c_Y,i-2,j,k,nsublist)  - 8.*&
+                  vorticity(c_X,i-1,j,k,nsublist)*velocity(c_Y,i-1,j,k,nsublist)  + 8.*&
                   vorticity(c_X,i+1,j,k,nsublist)*velocity(c_Y,i+1,j,k,nsublist)  - &
                   vorticity(c_X,i+2,j,k,nsublist)*velocity(c_Y,i+2,j,k,nsublist)
 
              tx(c_Z)=&
-                  vorticity(c_X,i-2,j,k,nsublist)*velocity(c_Z,i-2,j,k,nsublist)  - 8._mk*&
-                  vorticity(c_X,i-1,j,k,nsublist)*velocity(c_Z,i-1,j,k,nsublist)  + 8._mk*&
+                  vorticity(c_X,i-2,j,k,nsublist)*velocity(c_Z,i-2,j,k,nsublist)  - 8.*&
+                  vorticity(c_X,i-1,j,k,nsublist)*velocity(c_Z,i-1,j,k,nsublist)  + 8.*&
                   vorticity(c_X,i+1,j,k,nsublist)*velocity(c_Z,i+1,j,k,nsublist)  - &
                   vorticity(c_X,i+2,j,k,nsublist)*velocity(c_Z,i+2,j,k,nsublist)
 
              ty(c_X)=&
-                  vorticity(c_Y,i,j-2,k,nsublist)*velocity(c_X,i,j-2,k,nsublist) - 8._mk*&
-                  vorticity(c_Y,i,j-1,k,nsublist)*velocity(c_X,i,j-1,k,nsublist) + 8._mk*&
+                  vorticity(c_Y,i,j-2,k,nsublist)*velocity(c_X,i,j-2,k,nsublist) - 8.*&
+                  vorticity(c_Y,i,j-1,k,nsublist)*velocity(c_X,i,j-1,k,nsublist) + 8.*&
                   vorticity(c_Y,i,j+1,k,nsublist)*velocity(c_X,i,j+1,k,nsublist) - &
                   vorticity(c_Y,i,j+2,k,nsublist)*velocity(c_X,i,j+2,k,nsublist)
 
              ty(c_Y)=&
-                  vorticity(c_Y,i,j-2,k,nsublist)*velocity(c_Y,i,j-2,k,nsublist) - 8._mk*&
-                  vorticity(c_Y,i,j-1,k,nsublist)*velocity(c_Y,i,j-1,k,nsublist) + 8._mk*&
+                  vorticity(c_Y,i,j-2,k,nsublist)*velocity(c_Y,i,j-2,k,nsublist) - 8.*&
+                  vorticity(c_Y,i,j-1,k,nsublist)*velocity(c_Y,i,j-1,k,nsublist) + 8.*&
                   vorticity(c_Y,i,j+1,k,nsublist)*velocity(c_Y,i,j+1,k,nsublist) - &
                   vorticity(c_Y,i,j+2,k,nsublist)*velocity(c_Y,i,j+2,k,nsublist)
 
              ty(c_Z)=&
-                  vorticity(c_Y,i,j-2,k,nsublist)*velocity(c_Z,i,j-2,k,nsublist) - 8._mk*&
-                  vorticity(c_Y,i,j-1,k,nsublist)*velocity(c_Z,i,j-1,k,nsublist) + 8._mk*&
+                  vorticity(c_Y,i,j-2,k,nsublist)*velocity(c_Z,i,j-2,k,nsublist) - 8.*&
+                  vorticity(c_Y,i,j-1,k,nsublist)*velocity(c_Z,i,j-1,k,nsublist) + 8.*&
                   vorticity(c_Y,i,j+1,k,nsublist)*velocity(c_Z,i,j+1,k,nsublist) - &
                   vorticity(c_Y,i,j+2,k,nsublist)*velocity(c_Z,i,j+2,k,nsublist)
 
              tz(c_X)=&
-                  vorticity(c_Z,i,j,k-2,nsublist)*velocity(c_X,i,j,k-2,nsublist) - 8._mk*&
-                  vorticity(c_Z,i,j,k-1,nsublist)*velocity(c_X,i,j,k-1,nsublist) + 8._mk*&
+                  vorticity(c_Z,i,j,k-2,nsublist)*velocity(c_X,i,j,k-2,nsublist) - 8.*&
+                  vorticity(c_Z,i,j,k-1,nsublist)*velocity(c_X,i,j,k-1,nsublist) + 8.*&
                   vorticity(c_Z,i,j,k+1,nsublist)*velocity(c_X,i,j,k+1,nsublist) - &
                   vorticity(c_Z,i,j,k+2,nsublist)*velocity(c_X,i,j,k+2,nsublist)
 
              tz(c_Y)=&
-                  vorticity(c_Z,i,j,k-2,nsublist)*velocity(c_Y,i,j,k-2,nsublist) - 8._mk*&
-                  vorticity(c_Z,i,j,k-1,nsublist)*velocity(c_Y,i,j,k-1,nsublist) + 8._mk*&
+                  vorticity(c_Z,i,j,k-2,nsublist)*velocity(c_Y,i,j,k-2,nsublist) - 8.*&
+                  vorticity(c_Z,i,j,k-1,nsublist)*velocity(c_Y,i,j,k-1,nsublist) + 8.*&
                   vorticity(c_Z,i,j,k+1,nsublist)*velocity(c_Y,i,j,k+1,nsublist) - &
                   vorticity(c_Z,i,j,k+2,nsublist)*velocity(c_Y,i,j,k+2,nsublist)
 
              tz(c_Z)=&
-                  vorticity(c_Z,i,j,k-2,nsublist)*velocity(c_Z,i,j,k-2,nsublist) - 8._mk*&
-                  vorticity(c_Z,i,j,k-1,nsublist)*velocity(c_Z,i,j,k-1,nsublist) + 8._mk*&
+                  vorticity(c_Z,i,j,k-2,nsublist)*velocity(c_Z,i,j,k-2,nsublist) - 8.*&
+                  vorticity(c_Z,i,j,k-1,nsublist)*velocity(c_Z,i,j,k-1,nsublist) + 8.*&
                   vorticity(c_Z,i,j,k+1,nsublist)*velocity(c_Z,i,j,k+1,nsublist) -  &
                   vorticity(c_Z,i,j,k+2,nsublist)*velocity(c_Z,i,j,k+2,nsublist)
 
@@ -145,125 +147,225 @@ contains
              !diffusion
              !----------
              tx(c_X)= - & 
-                  vorticity(c_X,i+2,j,k,nsublist) + 16._mk*&
-                  vorticity(c_X,i+1,j,k,nsublist) - 30._mk*&
-                  vorticity(c_X,i,j,k,nsublist)   + 16._mk*&
+                  vorticity(c_X,i+2,j,k,nsublist) + 16.*&
+                  vorticity(c_X,i+1,j,k,nsublist) - 30.*&
+                  vorticity(c_X,i,j,k,nsublist)   + 16.*&
                   vorticity(c_X,i-1,j,k,nsublist) - &
                   vorticity(c_X,i-2,j,k,nsublist)
 
              tx(c_Y)= - &
-                  vorticity(c_Y,i+2,j,k,nsublist) + 16._mk*&
-                  vorticity(c_Y,i+1,j,k,nsublist) - 30._mk*&
-                  vorticity(c_Y,i,j,k,nsublist)   + 16._mk*&
+                  vorticity(c_Y,i+2,j,k,nsublist) + 16.*&
+                  vorticity(c_Y,i+1,j,k,nsublist) - 30.*&
+                  vorticity(c_Y,i,j,k,nsublist)   + 16.*&
                   vorticity(c_Y,i-1,j,k,nsublist) - &
                   vorticity(c_Y,i-2,j,k,nsublist)
 
              tx(c_Z) = - &
-                  vorticity(c_Z,i+2,j,k,nsublist) + 16._mk*&
-                  vorticity(c_Z,i+1,j,k,nsublist) - 30._mk*&
-                  vorticity(c_Z,i,j,k,nsublist)   + 16._mk*&
+                  vorticity(c_Z,i+2,j,k,nsublist) + 16.*&
+                  vorticity(c_Z,i+1,j,k,nsublist) - 30.*&
+                  vorticity(c_Z,i,j,k,nsublist)   + 16.*&
                   vorticity(c_Z,i-1,j,k,nsublist) - &
                   vorticity(c_Z,i-2,j,k,nsublist)
 
              ty(c_X)= - &
-                  vorticity(c_X,i,j+2,k,nsublist) + 16._mk*&
-                  vorticity(c_X,i,j+1,k,nsublist) - 30._mk*&
-                  vorticity(c_X,i,j,k,nsublist)   + 16._mk*&
+                  vorticity(c_X,i,j+2,k,nsublist) + 16.*&
+                  vorticity(c_X,i,j+1,k,nsublist) - 30.*&
+                  vorticity(c_X,i,j,k,nsublist)   + 16.*&
                   vorticity(c_X,i,j-1,k,nsublist) - &
                   vorticity(c_X,i,j-2,k,nsublist)
 
              ty(c_Y)= - &
-                  vorticity(c_Y,i,j+2,k,nsublist) + 16._mk*&
-                  vorticity(c_Y,i,j+1,k,nsublist) - 30._mk*&
-                  vorticity(c_Y,i,j,k,nsublist)   + 16._mk*&
+                  vorticity(c_Y,i,j+2,k,nsublist) + 16.*&
+                  vorticity(c_Y,i,j+1,k,nsublist) - 30.*&
+                  vorticity(c_Y,i,j,k,nsublist)   + 16.*&
                   vorticity(c_Y,i,j-1,k,nsublist) - &
                   vorticity(c_Y,i,j-2,k,nsublist)
 
              ty(c_Z)= - &
-                  vorticity(c_Z,i,j+2,k,nsublist) + 16._mk*&
-                  vorticity(c_Z,i,j+1,k,nsublist) - 30._mk*&
-                  vorticity(c_Z,i,j,k,nsublist)   + 16._mk*&
+                  vorticity(c_Z,i,j+2,k,nsublist) + 16.*&
+                  vorticity(c_Z,i,j+1,k,nsublist) - 30.*&
+                  vorticity(c_Z,i,j,k,nsublist)   + 16.*&
                   vorticity(c_Z,i,j-1,k,nsublist) - &
                   vorticity(c_Z,i,j-2,k,nsublist)
 
              tz(c_X)= - &
-                  vorticity(c_X,i,j,k+2,nsublist) + 16._mk*&
-                  vorticity(c_X,i,j,k+1,nsublist) - 30._mk*&
-                  vorticity(c_X,i,j,k,nsublist)   + 16._mk*&
+                  vorticity(c_X,i,j,k+2,nsublist) + 16.*&
+                  vorticity(c_X,i,j,k+1,nsublist) - 30.*&
+                  vorticity(c_X,i,j,k,nsublist)   + 16.*&
                   vorticity(c_X,i,j,k-1,nsublist) - &
                   vorticity(c_X,i,j,k-2,nsublist)
 
              tz(c_Y)= - &
-                  vorticity(c_Y,i,j,k+2,nsublist) + 16._mk*&
-                  vorticity(c_Y,i,j,k+1,nsublist) - 30._mk*&
-                  vorticity(c_Y,i,j,k,nsublist)   + 16._mk*&
+                  vorticity(c_Y,i,j,k+2,nsublist) + 16.*&
+                  vorticity(c_Y,i,j,k+1,nsublist) - 30.*&
+                  vorticity(c_Y,i,j,k,nsublist)   + 16.*&
                   vorticity(c_Y,i,j,k-1,nsublist) - &
                   vorticity(c_Y,i,j,k-2,nsublist)
 
              tz(c_Z)=- &
-                  vorticity(c_Z,i,j,k+2,nsublist) + 16._mk*&
-                  vorticity(c_Z,i,j,k+1,nsublist) - 30._mk*&
-                  vorticity(c_Z,i,j,k,nsublist)   + 16._mk*&
+                  vorticity(c_Z,i,j,k+2,nsublist) + 16.*&
+                  vorticity(c_Z,i,j,k+1,nsublist) - 30.*&
+                  vorticity(c_Z,i,j,k,nsublist)   + 16.*&
                   vorticity(c_Z,i,j,k-1,nsublist) - &
                   vorticity(c_Z,i,j,k-2,nsublist)
 
              diffusion = facx2*tx + facy2*ty + facz2*tz
-             rhs(:,i,j,k,nsublist) = stretch! + diffusion
+             rhs(:,i,j,k,nsublist) = stretch + diffusion
 
           end do
        end do
     end do
   end subroutine computeRHS
 
-  subroutine norm1(norm,field,resolution,step)
-    real(mk), dimension(dime), intent(out) :: norm
-    real(mk), dimension(:,:,:,:,:), pointer :: field
-    !> the local resolution
-    integer, dimension(dime),intent(in) :: resolution
-    !> size of mesh step in each dir
-    real(mk), dimension(dime),intent(in) :: step
+  !> Computes strech.
+  subroutine computeStretch(velocity,vorticity,stretch,resolution,step)
 
-    real(mk) :: h3
+    !> Velocity field
+    real(mk), dimension(:,:,:,:,:), pointer :: velocity
+    !> vorticity field
+    real(mk), dimension(:,:,:,:,:), pointer :: vorticity
+    !> rhs, output
+    real(mk), dimension(:,:,:,:,:), pointer :: stretch
+    !> local mesh resolution
+    integer,dimension(dim3),intent(in) :: resolution
+    !> mesh step sizes
+    real(mk),  dimension(dim3),intent(in) :: step
+
+    integer :: i,j,k
+    real(mk), dimension(dime) :: tx, ty, tz
+    real(mk) :: facx,facy,facz
+
+    facx=1./(12.*step(c_X))
+    facy=1./(12.*step(c_Y))
+    facz=1./(12.*step(c_Z))
+
+    do k=1,resolution(c_Z)
+       do j=1,resolution(c_Y)
+          do i=1,resolution(c_X)
+             !stretch
+             !------
+             tx(c_X)= &
+                  vorticity(c_X,i-2,j,k,nsublist)*velocity(c_X,i-2,j,k,nsublist)  - 8.*&
+                  vorticity(c_X,i-1,j,k,nsublist)*velocity(c_X,i-1,j,k,nsublist)  + 8.*&
+                  vorticity(c_X,i+1,j,k,nsublist)*velocity(c_X,i+1,j,k,nsublist)  -  &
+                  vorticity(c_X,i+2,j,k,nsublist)*velocity(c_X,i+2,j,k,nsublist)
+
+             tx(c_Y)= &
+                  vorticity(c_X,i-2,j,k,nsublist)*velocity(c_Y,i-2,j,k,nsublist)  - 8.*&
+                  vorticity(c_X,i-1,j,k,nsublist)*velocity(c_Y,i-1,j,k,nsublist)  + 8.*&
+                  vorticity(c_X,i+1,j,k,nsublist)*velocity(c_Y,i+1,j,k,nsublist)  - &
+                  vorticity(c_X,i+2,j,k,nsublist)*velocity(c_Y,i+2,j,k,nsublist)
+
+             tx(c_Z)=&
+                  vorticity(c_X,i-2,j,k,nsublist)*velocity(c_Z,i-2,j,k,nsublist)  - 8.*&
+                  vorticity(c_X,i-1,j,k,nsublist)*velocity(c_Z,i-1,j,k,nsublist)  + 8.*&
+                  vorticity(c_X,i+1,j,k,nsublist)*velocity(c_Z,i+1,j,k,nsublist)  - &
+                  vorticity(c_X,i+2,j,k,nsublist)*velocity(c_Z,i+2,j,k,nsublist)
+
+             ty(c_X)=&
+                  vorticity(c_Y,i,j-2,k,nsublist)*velocity(c_X,i,j-2,k,nsublist) - 8.*&
+                  vorticity(c_Y,i,j-1,k,nsublist)*velocity(c_X,i,j-1,k,nsublist) + 8.*&
+                  vorticity(c_Y,i,j+1,k,nsublist)*velocity(c_X,i,j+1,k,nsublist) - &
+                  vorticity(c_Y,i,j+2,k,nsublist)*velocity(c_X,i,j+2,k,nsublist)
+
+             ty(c_Y)=&
+                  vorticity(c_Y,i,j-2,k,nsublist)*velocity(c_Y,i,j-2,k,nsublist) - 8.*&
+                  vorticity(c_Y,i,j-1,k,nsublist)*velocity(c_Y,i,j-1,k,nsublist) + 8.*&
+                  vorticity(c_Y,i,j+1,k,nsublist)*velocity(c_Y,i,j+1,k,nsublist) - &
+                  vorticity(c_Y,i,j+2,k,nsublist)*velocity(c_Y,i,j+2,k,nsublist)
 
-    h3 = product(step)
-    norm(c_X) = h3*sum(abs(field(c_X,1:resolution(c_X),1:resolution(c_Y),1:resolution(c_Z),nsublist)))
-    norm(c_Y) = h3*sum(abs(field(c_Y,1:resolution(c_X),1:resolution(c_Y),1:resolution(c_Z),nsublist)))
-    norm(c_Z) = h3*sum(abs(field(c_Z,1:resolution(c_X),1:resolution(c_Y),1:resolution(c_Z),nsublist)))
-  end subroutine norm1
+             ty(c_Z)=&
+                  vorticity(c_Y,i,j-2,k,nsublist)*velocity(c_Z,i,j-2,k,nsublist) - 8.*&
+                  vorticity(c_Y,i,j-1,k,nsublist)*velocity(c_Z,i,j-1,k,nsublist) + 8.*&
+                  vorticity(c_Y,i,j+1,k,nsublist)*velocity(c_Z,i,j+1,k,nsublist) - &
+                  vorticity(c_Y,i,j+2,k,nsublist)*velocity(c_Z,i,j+2,k,nsublist)
 
-  subroutine norm2(norm,field,resolution,step)
-    real(mk), dimension(dime), intent(out) :: norm
-    real(mk), dimension(:,:,:,:,:), pointer :: field
+             tz(c_X)=&
+                  vorticity(c_Z,i,j,k-2,nsublist)*velocity(c_X,i,j,k-2,nsublist) - 8.*&
+                  vorticity(c_Z,i,j,k-1,nsublist)*velocity(c_X,i,j,k-1,nsublist) + 8.*&
+                  vorticity(c_Z,i,j,k+1,nsublist)*velocity(c_X,i,j,k+1,nsublist) - &
+                  vorticity(c_Z,i,j,k+2,nsublist)*velocity(c_X,i,j,k+2,nsublist)
+
+             tz(c_Y)=&
+                  vorticity(c_Z,i,j,k-2,nsublist)*velocity(c_Y,i,j,k-2,nsublist) - 8.*&
+                  vorticity(c_Z,i,j,k-1,nsublist)*velocity(c_Y,i,j,k-1,nsublist) + 8.*&
+                  vorticity(c_Z,i,j,k+1,nsublist)*velocity(c_Y,i,j,k+1,nsublist) - &
+                  vorticity(c_Z,i,j,k+2,nsublist)*velocity(c_Y,i,j,k+2,nsublist)
+
+             tz(c_Z)=&
+                  vorticity(c_Z,i,j,k-2,nsublist)*velocity(c_Z,i,j,k-2,nsublist) - 8.*&
+                  vorticity(c_Z,i,j,k-1,nsublist)*velocity(c_Z,i,j,k-1,nsublist) + 8.*&
+                  vorticity(c_Z,i,j,k+1,nsublist)*velocity(c_Z,i,j,k+1,nsublist) -  &
+                  vorticity(c_Z,i,j,k+2,nsublist)*velocity(c_Z,i,j,k+2,nsublist)
+
+             stretch(:,i,j,k,nsublist) = facx*tx+facy*ty+facz*tz
+
+          end do
+       end do
+    end do
+  end subroutine computeStretch
+
+  function norm1(field,resolution,step)
+    real(mk), dimension(dime) :: norm1
+    real(mk), dimension(:,:,:,:), pointer :: field
     !> the local resolution
-    integer, dimension(dime),intent(in) :: resolution
+    integer, dimension(dim3),intent(in) :: resolution
     !> size of mesh step in each dir
-    real(mk), dimension(dime),intent(in) :: step
-    
+    real(mk), dimension(dim3),intent(in) :: step
+    real(mk),dimension(dime) :: buffer
     real(mk) :: h3
-    
-    h3 = product(step)
-    norm(c_X) = sqrt(h3*sum(abs(field(c_X,1:resolution(c_X),1:resolution(c_Y),1:resolution(c_Z),nsublist))**2))
-    norm(c_Y) = sqrt(h3*sum(abs(field(c_Y,1:resolution(c_X),1:resolution(c_Y),1:resolution(c_Z),nsublist))**2))
-    norm(c_Z) = sqrt(h3*sum(abs(field(c_Z,1:resolution(c_X),1:resolution(c_Y),1:resolution(c_Z),nsublist))**2))       
-  end subroutine norm2
-
-  subroutine normInf(norm,field,resolution,step)
-    real(mk), dimension(dime), intent(out) :: norm
-    real(mk), dimension(:,:,:,:,:), pointer :: field
+    integer :: i,info
+    integer,dimension(dim3) :: nn
+    nn = max(1,resolution-1)
+    h3 = product(step(1:dime))
+    do i=1,dime
+       buffer(i) = sum(abs(field(i,1:nn(c_X),1:nn(c_Y),1:nn(c_Z))))
+    end do
+    ! Norm is computed only on proc 0
+    call MPI_Reduce(buffer,norm1,dime,MPI_DOUBLE_PRECISION,MPI_SUM,0,MPI_COMM_WORLD,info)
+    if(rank == 0) norm1 = h3*norm1
+
+  end function norm1
+
+  function norm2(field,resolution,step)
+    real(mk), dimension(dime) :: norm2
+    real(mk), dimension(:,:,:,:), pointer :: field
     !> the local resolution
-    integer, dimension(dime),intent(in) :: resolution
+    integer, dimension(dim3),intent(in) :: resolution
     !> size of mesh step in each dir
-    real(mk), dimension(dime),intent(in) :: step
-    
+    real(mk), dimension(dim3),intent(in) :: step
+    real(mk),dimension(dime) :: buffer
     real(mk) :: h3
+    integer :: i,info
+    integer,dimension(dim3) :: nn
+    h3 = product(step(1:dime))
+    nn = max(1,resolution-1)
+    buffer = 0.0
+    do i = 1,dime
+       buffer(i) = sum(abs(field(i,1:nn(c_X),1:nn(c_Y),1:nn(c_Z)))**2)
+    end do
+    ! Norm is computed only on proc 0
+    call MPI_Reduce(buffer,norm2,dime,MPI_DOUBLE_PRECISION,MPI_SUM,0,MPI_COMM_WORLD,info)
+    if(rank == 0) norm2 = sqrt(h3*norm2)
     
-    h3 = product(step)
-    norm(c_X) = maxval(abs(field(c_X,1:resolution(c_X),1:resolution(c_Y),1:resolution(c_Z),nsublist)))
-    norm(c_Y) = maxval(abs(field(c_Y,1:resolution(c_X),1:resolution(c_Y),1:resolution(c_Z),nsublist)))
-    norm(c_Z) = maxval(abs(field(c_Z,1:resolution(c_X),1:resolution(c_Y),1:resolution(c_Z),nsublist)))
-    
-  end subroutine normInf
+  end function norm2
 
+  function normInf(field,resolution)
+    real(mk), dimension(dime) :: normInf
+    real(mk), dimension(:,:,:,:), pointer :: field
+    !> the local resolution
+    integer, dimension(dim3),intent(in) :: resolution
+    !> size of mesh step in each dir
+    real(mk),dimension(dime) :: buffer
+    integer :: i,info
+    do i = 1,dime
+       buffer(i) = maxval(abs(field(i,1:resolution(c_X),1:resolution(c_Y),1:resolution(c_Z))))
+    end do  
+    ! Norm is computed only on proc 0
+    call MPI_Reduce(buffer,normInf,dime,MPI_DOUBLE_PRECISION,MPI_MAX,0,MPI_COMM_WORLD,info)
+
+  end function normInf
+  
   function cross_prod(v1,v2)
     real(mk), dimension(dime), intent(in) :: v1
     real(mk), dimension(dime), intent(in) :: v2
diff --git a/HySoP/src/client_data.f90 b/HySoP/src/client_data.f90
new file mode 100755
index 000000000..83e833f6b
--- /dev/null
+++ b/HySoP/src/client_data.f90
@@ -0,0 +1,32 @@
+!> Some global parameters and variables
+module client_data
+
+  use MPI, only : MPI_DOUBLE_PRECISION, MPI_REAL,MPI_COMM_WORLD
+  use, intrinsic :: iso_c_binding ! required for fftw
+  implicit none 
+  
+  !> kind for real variables (simple or double precision)
+  integer, parameter :: mk = kind(1.0d0) ! double precision  
+  !integer, parameter :: mk = kind(1.0) ! simple precision  
+  !> kind for real variables in mpi routines
+  integer, parameter :: mpi_mk = MPI_DOUBLE_PRECISION
+  !integer, parameter :: mpi_mk = MPI_REAL
+  !> Problem dimension (model, required for ppm to work properly)
+  integer, parameter :: dime = 2
+  !> Real dimension
+  integer, parameter :: dim3 = 3
+  !> Pi constant
+  real(mk), parameter :: pi = 4.0*atan(1.0_mk)
+  !> Rank of the mpi current process 
+  integer :: rank ! current mpi-processus rank
+  !> Total number of mpi process
+  integer :: nbprocs 
+  !>  trick to identify coordinates in a more user-friendly way 
+  integer,parameter :: c_X=1,c_Y=2,c_Z=3
+  !> to activate (or not) screen output
+  logical,parameter :: verbose = .True.
+  !> i (sqrt(-1) ...)
+  complex(C_DOUBLE_COMPLEX),parameter :: Icmplx = cmplx(0._mk,1._mk)
+  
+  
+end module client_data
diff --git a/HySoP/src/main/io_vtk.f90 b/HySoP/src/io_vtk.f90
similarity index 78%
rename from HySoP/src/main/io_vtk.f90
rename to HySoP/src/io_vtk.f90
index e83e5a80b..52cfe1413 100755
--- a/HySoP/src/main/io_vtk.f90
+++ b/HySoP/src/io_vtk.f90
@@ -54,7 +54,7 @@ contains
     write(11,'(A7)')"vecteur"
     write(11,'(A5)')"ASCII"
     write(11,'(A25)')"DATASET STRUCTURED_POINTS"
-    write(11,'(A10,3(i4,1x))')"DIMENSIONS",nx,ny,nz
+    write(11,'(A10,3(i6,1x))')"DIMENSIONS",nx,ny,nz
     write(11,'(a6,3(f10.5))')"ORIGIN", coordMin
     write(11,'(A7,3(f10.5))')"SPACING",spacing
     write(11,'(A10,i10)') "POINT_DATA",nbpoints
@@ -63,7 +63,7 @@ contains
     write(12,'(A7)')"vecteur"
     write(12,'(A5)')"ASCII"
     write(12,'(A25)')"DATASET STRUCTURED_POINTS"
-    write(12,'(A10,3(i4,1x))')"DIMENSIONS",nx,ny,nz
+    write(12,'(A10,3(i6,1x))')"DIMENSIONS",nx,ny,nz
     write(12,'(a6,3(f10.5))')"ORIGIN", coordMin
     write(12,'(A7,3(f10.5))')"SPACING",spacing
     write(12,'(A10,i10)') "POINT_DATA",nbpoints
@@ -89,6 +89,45 @@ contains
   subroutine printChiToVTK(filename,testFunc,resolution,spacing,coordMin)
     character(len=*), intent(in) :: filename
     real(mk), dimension(:,:,:), pointer :: testFunc
+    real(mk), dimension(dim3),intent(in) :: spacing
+    !> Number of points in each dir (1st index) for each sub (2nd index)
+    integer, dimension(dim3),intent(in)  :: resolution
+    real(mk),dimension(dim3),intent(in)::coordMin
+ 
+    integer :: nbpoints
+    ! sub proc number
+    ! local resolution
+    integer :: nx,ny,nz
+    
+    !local filename (depends on subproc number)
+    character(len=30) :: localname,buffer
+
+    ! output = nb of iterations
+    nx = resolution(1)
+    ny = resolution(2)
+    nz = resolution(3)
+    nbpoints = nx*ny*nz
+    write(buffer, *) rank
+    buffer = adjustl(buffer)
+    localname = trim(filename)//"_chi_"//trim(buffer)//".vti"
+    open(unit=11,file=localname,form="formatted")
+    write(11,'(A26)')"# vtk DataFile Version 3.0"
+    write(11,'(A7)')"scalaire"
+    write(11,'(A5)')"ASCII"
+    write(11,'(A25)')"DATASET STRUCTURED_POINTS"
+    write(11,'(A10,3(i8,1x))')"DIMENSIONS",nx,ny,nz
+    write(11,'(a6,3(f10.5))')"ORIGIN", coordMin
+    write(11,'(A7,3(f10.5))')"SPACING",spacing
+    write(11,'(A10,i10)') "POINT_DATA",nbpoints
+    write(11,'(A21)') "SCALARS chi FLOAT"
+    write(11,'(A21)') "LOOKUP_TABLE default" 
+    write(11,'(f20.9)') testFunc(1:nx,1:ny,1:nz)
+
+  end subroutine printChiToVTK
+  
+  subroutine printScalarToVTK(filename,scalar,resolution,spacing,coordMin)
+    character(len=*), intent(in) :: filename
+    real(mk), dimension(:,:,:,:), pointer :: scalar
     real(mk), dimension(dime),intent(in) :: spacing
     !> Number of points in each dir (1st index) for each sub (2nd index)
     integer, dimension(dime),intent(in)  :: resolution
@@ -119,11 +158,11 @@ contains
     write(11,'(a6,3(f10.5))')"ORIGIN", coordMin
     write(11,'(A7,3(f10.5))')"SPACING",spacing
     write(11,'(A10,i10)') "POINT_DATA",nbpoints
-    write(11,'(A21)') "SCALARS chi FLOAT"
+    write(11,'(A21)') "SCALARS scal FLOAT"
     write(11,'(A21)') "LOOKUP_TABLE default" 
-    write(11,'(3(f20.9))') testFunc(1:nx,1:ny,1:nz)
+    write(11,'(3(f20.9))') scalar(1:nx,1:ny,1:nz,1)
 
-  end subroutine printChiToVTK
+  end subroutine printScalarToVTK
 
   subroutine printPvtkFile(filename,spacing,coordMin,coordMax)
     
diff --git a/HySoP/src/main/Domain.f90 b/HySoP/src/main/Domain.f90
deleted file mode 100755
index f47f23319..000000000
--- a/HySoP/src/main/Domain.f90
+++ /dev/null
@@ -1,80 +0,0 @@
-!> Physical domain and grid
-!! In this file we set the size of the physical domain, the grid resolution and so on.
-module Domain
-
-  use client_data
-  
-  implicit none
-
-  private
-  public init_geometry, init_grid
-  public falseLowerPoint,falseUpperPoint,lowerPoint,upperPoint,grid_resolution,grid_step,domain_ghostsize,lengths,domain_bc
-
-  !> Computational domain limits
-  real(mk), dimension(:), pointer :: falseLowerPoint =>NULL(), falseUpperPoint=>NULL()
-  !> Real domain limits
-  real(mk), dimension(dime) :: lowerPoint,upperPoint
-  !> Sizes of computational domain
-  real(mk), dimension(:), pointer :: lengths =>NULL()
-  !> Boundary conditions for the computational domain
-  integer,  dimension(:), pointer :: domain_bc=>NULL()
-  !> Number of ghost points in each direction
-  integer,  dimension(:), pointer :: domain_ghostsize=>NULL()
-  !> Grid resolution
-  integer,  dimension(:), pointer :: grid_resolution =>NULL()
-  !> Grid space step sizes
-  real(mk),  dimension(:), pointer :: grid_step =>NULL()
-  
-  
-  integer, private :: istat
-
-contains
-  
-  subroutine init_geometry(bc)
-    
-    real(mk) :: zFolke,layer
-    integer, intent(in) :: bc
-
-    zFolke = 1./0.4844
-    layer = 0.1
-
-    ! Domain size and min/max coords
-    allocate(falseLowerPoint(dime), falseUpperPoint(dime), lengths(dime),stat = istat)
-    if(istat.ne.0) stop 'Geometry, allocation failed'
-    
-    falseLowerPoint(1) = 0.0  
-    falseUpperPoint(1) = 2.*zFolke
-    falseLowerPoint(2) = -zFolke
-    falseUpperPoint(2) =  zFolke
-    ! layer: boundary layer 
-    falseLowerPoint(3) = -zFolke - layer
-    falseUpperPoint(3) =  zFolke + layer
-
-    ! Boundary conditions and ghosts
-    allocate(domain_bc(2*dime), domain_ghostsize(dime), stat = istat)
-    if(istat.ne.0) stop 'BC, allocation failed'
-    domain_bc = bc
-    domain_ghostsize = 2
-    lengths = falseUpperPoint - falseLowerPoint
-    ! set boundary layer(s) positions
-    lowerPoint(1:2) = falseLowerPoint(1:2)
-    upperPoint(1:2) = falseUpperPoint(1:2)
-    lowerPoint(3) = falseLowerPoint(3) + layer
-    upperPoint(3) = falseUpperPoint(3) - layer
-
-    ! Note Franck : "domain_min/maxCoords correspond to the computational domain and 
-    ! lowerPoint/max to the "physical domain".
-    ! ==> computational domain is used to compute velocity from vorticity using fftw (requires periodicity)
-    ! ==> physical domain is a 'reduction' of computational domain used to enforce boundary conditions (with penalisation) after the fft.  
-
-  end subroutine init_geometry
-
-  subroutine init_grid()
-    
-    allocate(grid_resolution(dime),grid_step(dime),stat=istat)
-    if(istat.ne.0) stop 'grid_resolution, allocation failed'
-    grid_resolution = 65
-    grid_step = (falseUpperPoint - falseLowerPoint)/(real(grid_resolution,mk)-1.)
-  end subroutine init_grid
-
-end module Domain
diff --git a/HySoP/src/main/Fields.f90 b/HySoP/src/main/Fields.f90
deleted file mode 100755
index c87cf904b..000000000
--- a/HySoP/src/main/Fields.f90
+++ /dev/null
@@ -1,67 +0,0 @@
-!> Declaration, allocation of all the fields on the grid.
-module Fields
-  
-  use client_data
-  use client_topology, only: nsublist
-
-  implicit none
-
-  !> Velocity 
-  real(mk), dimension(:,:,:,:,:), pointer :: velocity => NULL()
-  !> Vorticity
-  real(mk), dimension(:,:,:,:,:), pointer :: vorticity =>NULL()
-  !> Stream function - Test purpose. Useless if ppm fft solver works as it is supposed to ...
-  ! real(mk), dimension(:,:,:,:,:), pointer :: stream_function =>NULL()
-  !> rhs of vorticity eq (i.e. stretch + diffusion terms)
-  real(mk), dimension(:,:,:,:,:), pointer :: rhs =>NULL()
-  !> 
-  !real(mk), dimension(:,:,:,:,:), pointer :: vel_ex => NULL()
-  !> Scalar on the grid, test purpose for chi functions
-  real(mk),dimension(:,:,:),pointer::testFunc=>NULL()
-  
-contains
-  
-  !> Fields allocation. 
-  !! Warning : ghostpoints must be included in field (i.e. size = "real size" + 2*number of ghostpoints)
-  subroutine init_fields(resolution,ghostsize)
-    
-    !> Required resolution for the fields (without ghosts)
-    integer, dimension(dime), intent(in) :: resolution
-    !> number of ghost points in each direction (ghost(c_X) = 2 means resolution(c_X)+ 4 points for the field)
-    integer, dimension(:),pointer:: ghostsize
-    
-    integer::istat
-    ! Lower and upper bounds for fields
-    integer, dimension(dime) :: ldl, ldu
-    ! nsublist from ppm topo. Assumed to be equal to 1 see Topology.f90
-    
-    ldl = 1 - ghostsize
-    ldu = resolution + ghostsize
-    ! Velocity ...
-    allocate(velocity(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist),stat = istat)
-    if(istat.ne.0) stop 'Field allocation error for velocity'
-
-    ! Vorticity
-    allocate(vorticity(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
-    if(istat.ne.0) stop 'Field allocation error for vorticity'
-    ! rhs
-    allocate(rhs(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
-    if(istat.ne.0) stop 'Field allocation error for rhs'
-    !allocate(stream_function(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
-    !if(istat.ne.0) stop 'stream_function allocation error for rhs'
-    allocate(testFunc(ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3)), stat = istat)
-    if(istat.ne.0) stop 'Field allocation error for testFunc'
-  end subroutine init_fields
-
-  !> compute the size of the memory used to save fields
-  function getMemoryUsedForFields()
-    real(mk) :: getMemoryUsedForFields 
-    getMemoryUsedForFields = sizeof(velocity)+sizeof(vorticity)+sizeof(rhs)+sizeof(testfunc)
-    getMemoryUsedForFields = getMemoryUsedForFields*1.e-6
-    if(verbose) then
-       write(*,'(a,i3,a,f10.4,a)') &
-            '[',rank,'] Fields have been initialized.  Memory used :', getMemoryUsedForFields, ' MB.'
-    end if
-  end function getMemoryUsedForFields
-
-end module Fields
diff --git a/HySoP/src/main/NavierStokes2D.f90 b/HySoP/src/main/NavierStokes2D.f90
new file mode 100755
index 000000000..a2e9b6782
--- /dev/null
+++ b/HySoP/src/main/NavierStokes2D.f90
@@ -0,0 +1,603 @@
+!> This module is used to run a simulation using 
+!! ppm (core and numerics).
+!! Solve Navier-Stokes (vorticity) for a 2D flow around a cylinder.
+!!
+module NavierStokes2D
+  
+  ! All required ppm modules ...
+  use ppm_module_init, only : ppm_init
+  use ppm_module_data, only : ppm_kind_double,ppm_param_bcdef_periodic
+  use ppm_module_finalize, only : ppm_finalize
+  use ppm_module_map_field_ghost, only:ppm_map_field_ghost_get, ppm_map_field_ghost_put
+  use ppm_module_map_field
+  use ppm_module_is_initialized
+  ! use client_io
+  use client_data
+  ! some tools
+  use parmesTools
+  ! Physical domain and grid
+  use Domain
+  ! Fields on the grid
+  use Fields, only: initFields, velocity, vorticity2D, gauss,rhs,scalar!,testFunc!,getMemoryUsedForFields,shiftVelocityX, gauss!vel_ex)!, stream_function
+  use PPMFields
+  ! Topology
+  use client_topology, only: PPMinitTopo,topo,getPPMLocalResolution,meshNum
+  ! Poisson (ppm) solver
+  use Solver, only : init_poisson_solver, solve_poisson, ppm_poisson_drv_none, ppm_poisson_drv_curl_fd2, ppm_poisson_drv_curl_fd4
+  ! Penalisation stuff
+  use penalisation, only : penalise_velocity
+  ! Functions used to identify surfaces, volumes ... 
+  use SetsIndicators,only:chi_sphere,chi_boundary,compute_control_box,init_obstacles,nocaForces,&
+       compute_test,chi_box,getMemoryForIndicators
+  ! curl, prod ...
+  use vectorcalculus
+  ! everything dealing with particles
+  use Particles!, only : initNSSolver_particles,getMemoryUsedForParticles,countAndUpdateParticles,countAndCreateParticles,RK4_2D,&
+  !     createParticlesEverywhere,PPMupdateParticles2D,Ppmremesh2d,RK2_2D
+  ! file io 
+  use io_vtk
+  
+  use mpi
+  
+  ! user-defined functions (for tests or fields initialization)
+  use testsFunctions
+
+  use poisson
+
+!  use solverDiffusion
+
+  implicit none
+
+  ! Some global vars ... 
+  !> counter for memory usage
+  real(mk) :: memoryUsed
+  !> required flow rate (used to shift velocity after Poisson)
+  real(mk) :: reqFlowRate
+  
+contains
+
+  !> All required initialisations :
+  !! MPI, ppm
+  !! Domain, grid
+  !! Fields
+  !! Particles
+  subroutine init_client(Re,localResolution,coordMin)
+
+    use client_topology, only: isubl
+    !> Reynolds numner
+    real(mk),intent(in) ::Re
+    !> local mesh resolution
+    integer, dimension(dim3),intent(out) :: localResolution
+    !> Local (proc.) minimum point coordinates
+    real(mk),dimension(dim3),intent(out) ::coordMin
+
+    ! Precision and tolerance (for ppm init)
+    integer :: prec,tol
+    ! MPI comm
+    integer :: comm
+    ! debug mode (for ppm)
+    integer :: debug
+    
+    !> Size of the control box (user defined)
+    real(mk) :: sizeOfTheBox  
+    !> Dimensions and positions of the control volume
+    real(mk),dimension(dime):: boxMin,boxMax
+    
+    !> For ppm fftw solver
+    integer :: derive
+    
+    !> Sphere radius
+    real(mk) :: sphere_radius 
+    !> Sphere position
+    real(mk),dimension(dime) :: sphere_pos
+    !> velocity inf
+    real(mk),parameter :: uinf = 1.0
+    integer :: nbPoints
+
+    !> Thickness of the boundary layer used to enforce Dirichlet BC with penalisation
+    real(mk) :: layer
+
+    logical :: testInit = .False.
+    integer :: info
+
+    ! "read" mpi parameters
+    call MPI_COMM_RANK(MPI_COMM_WORLD,rank,info)
+    call MPI_COMM_SIZE(MPI_COMM_WORLD,nbprocs,info)
+    comm = MPI_COMM_WORLD  
+    
+    write (*,'(a,i5,a)') '[',rank,'] --------------------------------------------------------------> start initialization '
+
+    !======================
+    ! Init ppm 
+    !======================
+    prec = ppm_kind_double 
+    debug = 0
+    tol = -10
+    info = 0 !!! Warning : info MUST be 0 else ppm does not init and tells nothing ...
+    call ppm_init(dime,prec,tol,comm,debug,info)
+    
+    !======================
+    ! Geometry and grid
+    !======================
+    ! Set domain size, resolution, boundaries, number of ghost points ... 
+    ! At the time, many values are set in Domain.f90 file
+    call init_geometry(ppm_param_bcdef_periodic)
+    call init_grid()
+
+    !======================
+    ! Creates the topology
+    !======================
+    ! Based on ppm. 
+    call PPMinitTopo(physDomainLowerPoint,physDomainUpperPoint,domain_bc,domain_ghostsize,grid_resolution)
+    
+    !> Get the coordinates of the lowest point for the current domain
+    coordMin(:)=topo%min_subd(:,isubl) 
+    coordMin(c_Z) = physDomainUpperPoint(c_Z)
+    !======================
+    ! Fields allocation
+    !======================
+    ! Local number of nodes
+    localResolution = getPPMLocalResolution(topo,meshNum)
+
+    ! Allocate fields on the local mesh (velocity, vorticity ...)
+    call initFields(localResolution,domain_ghostsize)
+    
+    !=======================================================
+    ! Set obstacle (sphere) and other specific parameters.
+    ! Compute a control volume for diagnostics computation
+    !=======================================================
+    !> Set sphere size and position
+!!$    sphere_radius=0.5
+!!$    sphere_pos(c_Y)=(physDomainUpperPoint(c_Y)+physDomainLowerPoint(c_Y))/2.0
+!!$    sphere_pos(c_X) = 0.0
+!!$ !   sphere_pos(c_Z) = 0.0
+!!$    
+!!$    if(rank == 0) then
+!!$       reqFlowRate = requiredFlowRate2D(sphere_radius,domainLength,physDomainLowerPoint,physDomainUpperPoint,uinf)
+!!$    end if
+!!$    call MPI_Bcast(reqFlowRate,1,mpi_mk,0,MPI_COMM_WORLD,info);
+
+    !! We set the size of the box (must be greater than the sphere diameter ...)
+    !    sizeOfTheBox = domainLength(c_Y)-0.3 ! 6*sphere_radius
+    
+    !! Position of the upper and lower points of the box
+    !! Compute the box position on the grid
+    !nbPoints = floor(abs(0.5*(domainLength(c_Z)-sizeOfTheBox)/grid_step(c_Z)))
+    !boxMin=physDomainLowerPoint!+3*grid_step
+    !boxMax=physDomainUpperPoint!-3*grid_step
+!!$    nbPoints = 200
+!!$
+!!$    boxMin = physDomainLowerPoint
+!!$    boxMax = physDomainUpperPoint
+!!$    boxMin(c_Y)=physDomainLowerPoint(c_Y)+ nbPoints*grid_step(c_Y)
+!!$    boxMax(c_Y)=physDomainUpperPoint(c_Y)- nbPoints*grid_step(c_Y)
+!!$    boxMin(c_X)=physDomainLowerPoint(c_X)+ nbPoints*grid_step(c_X)
+!!$    boxMax(c_X)=physDomainUpperPoint(c_X)- (nbPoints+401)*grid_step(c_X)
+!!$    
+    !! compute indicator functions for the control box and the sphere
+  !  call compute_control_box(localResolution,grid_step,boxMin,boxMax,sphere_pos,sphere_radius,coordMin)
+    
+    !====================================================================
+    ! Compute indicator function for the boundaries in z dir 
+    ! (will be used to enforce dirichlet conditions on these boundaries.)
+    !====================================================================
+    layer = 0.0
+   ! call init_obstacles(localResolution,grid_step,physDomainLowerPoint,physDomainUpperPoint,sphere_pos,sphere_radius,layer,coordMin)
+    ! Test : export test function to vtk file.
+    ! compute_test set 1 at all points belonging to chi_... 
+ !   testFunc=0.0
+    !call compute_test(testFunc,chi_boundary)
+    !call compute_test(testFunc,chi_box)
+    !testFunc = 1.0
+    !call printChiToVTK("box",testFunc,localResolution,grid_step,coordMin)
+    !! display the memory used for indicator sets and fields
+ !   memoryUsed=getMemoryUsedForFields()+getMemoryForIndicators()
+!    deallocate(testFunc)
+   
+    !================================
+    ! Solvers (grid and particles) 
+    !================================
+    
+    ! --- Grid solver for Poisson ---
+    ! derive : means that finite difference, order 4, are used to compute velocity from stream function, in ppm
+    !derive = ppm_poisson_drv_curl_fd4 
+    ! init fftw solver, from ppm
+    !call init_poisson_solver(vorticity,velocity,topo%ID,meshNum,derive)
+    
+    ! Initialisation of all the fftw stuff (plans ...).
+    ! Warnings : 
+    ! - this must be done before any initialisation of the fields : plans creation may rewrite data of fieldIn/fieldOut.
+    ! - last argument (resolution) must be the global resolution : dedicated mpi-topologies are created for fftw. 
+    call initFFTW2D(grid_resolution,domainLength)
+    
+    !call diffusionInitFFT(topo%ID,velocity,vorticity,1./Re,domainLength,grid_resolution,physDomainLowerPoint,&
+     !    physDomainUpperPoint,domain_bc)
+    ! --- Particles ---
+    ! Set particles parameters (kernel type, cutoff ...)
+    call initNSSolver_particles()
+    
+    if(verbose) then
+       write(*,'(a,i5,a)') '[',rank,'] ======================================== Summary ========================================'
+       if (rank==0) then
+          write(*,'(a,i5,a,3f10.4)') '[',rank,'] Computational domain (geom) dimensions : ', domainLength
+          write(*,'(a,i5,a,3d20.10)') '[',rank,'] Space step : ', grid_step
+          write(*,'(a,i5,a,f10.4)') '[',rank,'] Reynolds : ', Re
+          write(*,'(a,i5,a,f10.4,3f10.4)') '[',rank,'] Sphere radius and position : ', sphere_radius,sphere_pos
+          write(*,'(a,i5,a,f10.4)') '[',rank,'] The required flow rate is : ', reqFlowRate
+          write(*,'(a,i5,a,i5)') '[',rank,'] Number of points in the boundary layer: ', nbpoints
+       end if
+       write(*,'(a,i5,a,3f10.5)') '[',rank,'] Current subdomain (geom) position : ', coordMin
+       write(*,'(a,i5,a,3i10)') '[',rank,'] Current subdomain resolution : ', localResolution
+       write(*,'(a,i5,a)') '[',rank,'] ========================================================================================='
+    end if
+    
+    write (*,'(a,i5,a)') '[',rank,'] --------------------------------------------------------------> end of initialization'
+
+  end subroutine init_client
+ 
+  subroutine main_client() bind(c,name='NavierStokes2D')
+
+    real(mk) :: initial_time,final_time
+    real(mk) :: Re
+    real(mk) :: t1
+    real(mk), dimension(dim3) :: coordMin
+    !> local mesh resolution
+    integer, dimension(dim3) :: localResolution
+
+    real(mk) :: shift = 0.0
+    real(mk),dimension(dime)::coord
+    integer :: i,info,j
+    character(len=30) :: buffer
+    real(mk),dimension(dime)::center
+    Re = 133.4! 210.6
+    initial_time = 0.0
+    final_time = 100.0
+   ! Initialisation stuff 
+    t1 = MPI_WTIME()
+    call init_client(Re,localResolution,coordMin)
+    write (*,'(a,i5,a,f10.5)')  '[',rank,'] Initialisation time: ', MPI_WTIME()-t1
+
+    
+    do j=1,localResolution(c_Y)
+       coord(c_Y) = coordMin(c_Y) + (j-1)*grid_step(c_Y)
+       do i =1,localResolution(c_X)
+          coord(c_X) = coordMin(c_X) + (i-1)*grid_step(c_X)
+          vorticity2D(i,j,1) = -4.*pi*pi*cos(2.*pi*coord(c_X)/domainLength(c_X))/(domainLength(c_X)**2)! + cos(2.*pi*coord(c_Y)/domainLength(c_Y)) 
+       end do
+    end do
+
+    PPMvelocity2D(c_Y,:,:,:) = 0.0
+    !rhs(:,:,:,:) = velocity(:,:,:,:)
+    !vorticity2D(:,:,:) = 1.0!velocity(:,:,:,:)
+    !vorticity2D(1:localResolution(c_X)-1,1:localResolution(c_Y)-1,1) = 0.0
+    !print *, "N2",norm2(velocity,localResolution,grid_step)!)velocity(c_X,:,:,:)
+    !print *, "N2 v", norm2(vorticity2D,localResolution,grid_step)!)vorticity(c_X,:,:,:)
+    call solvePoisson(vorticity2D,velocity,grid_resolution,topo%ID,meshNum)
+    print *, "end of poisson solve"
+!!$    call MPI_BARRIER(MPI_COMM_WORLD,info)
+!!$
+!!$    print *, "post N2",norm2(velocity,localResolution,grid_step)!)velocity(c_X,:,:,:)
+!!$    !print *, "post N2 v", norm2(vorticity2D,localResolution,grid_step)!)vorticity(c_X,:,:,:)
+!!$    do i = 1, localResolution(c_X)
+!!$       print *,"------------", velocity(c_X,i,:,1)
+!!$       print *,"************", vorticity2D(i,:,1)
+!!$    end do
+!!$
+!!$
+    
+    !vorticity2D = vorticity - rhs
+    !print *, "results :", norm1(vorticity,localResolution,grid_step)
+
+
+
+
+!    call Mpi_barrier(MPI_COMM_WORLD,info)
+
+
+    write(buffer,*) rank
+    buffer = adjustl(buffer)
+    buffer = "gauss"//buffer
+    open(43,file=buffer) ! Output only for one process
+    do j=1,localResolution(c_Y)
+       coord(c_Y) = coordMin(c_Y) + (j-1)*grid_step(c_Y)
+    !   do i =1,localResolution(c_X)
+          !coord(c_X) = coordMin(c_X) + (i-1)*grid_step(c_X)
+       write(43,'(4e14.5)') coord(c_Y),velocity(c_X,5,j,1) 
+     !  end do
+    end do
+    
+   close(43)
+!!$    shift = 0!-500.*1e-3
+!!$    ! init a Gaussian
+!!$    center = 0.0
+!!$    !call Gaussian1D(scalar,localResolution,grid_step,coordMin,c_X,shift)
+!!$!!    call Gaussian2D(scalar,localResolution,grid_step,coordMin,center)
+!!$    center(c_X) = 100.*1e-3
+    !shift = 100.*1e-3
+    !    call Gaussian1D(gauss,localResolution,grid_step,coordMin,c_X,shift)
+!!$    call Gaussian2D(gauss,localResolution,grid_step,coordMin,center)
+!!$    call ppm_map_field_ghost_get(topo%ID,meshNum,domain_ghostsize,info)
+!!$    call ppm_map_field_push(topo%ID,meshNum,PPMscalar2D,info)
+!!$    call ppm_map_field_send(info)
+!!$    call ppm_map_field_pop(topo%ID,meshNum,PPMscalar2D,domain_ghostsize,info)
+    ! Time loop 
+!    call timeLoop(initial_time,final_time,Re,localResolution,coordMin)
+!!$    rhs = 0.0
+!!$    rhs(1,:,:,:) = gauss - scalar
+!!$    
+!!$    print *, rank,"norms ", norm1(rhs,localResolution,grid_step),norm2(rhs,localResolution,grid_step),normInf(rhs,localResolution)
+!!$    !print *, "jzaiziaui", rank,rhs(1,65,12,1),rhs(1,1,12,1)
+!!$    !do j = 1, localResolution(c_Y)
+!!$    j = 43
+!!$       coord(c_Y) =coordMin(c_Y) + (j-1)*grid_step(c_Y) 
+!!$       do i = 1,localResolution(c_X)
+!!$          coord(c_X) = coordMin(c_X) + (i-1)*grid_step(c_X)
+!!$          if(gauss(i,j,1)< 1e-5) gauss(i,j,1)=0.0
+!!$          if(scalar(i,j,1)< 1e-5) scalar(i,j,1)=0.0
+!!$          write(43,'(4e14.5)') coord(c_X),coord(c_Y),real(gauss(i,j,1)),real(PPMscalar2D(i,j,1))
+!!$       end do
+    !end do
+
+!!$    print *,"max diff", maxloc(rhs),maxval(rhs)
+!!$    close(43)
+    !call printToVTK("gauss",0,gauss,vorticity,localResolution,grid_step,coordMin)
+    
+    ! Close everything related to ppm
+    call ppm_finalize(info)
+    write (*,'(a,i5,a)')  '[',rank,'] ==================================== End of simulation. ===================================='
+
+  end subroutine main_client
+
+
+  !> Simulation over time ...
+  subroutine timeLoop(initial_time,final_time,Re,resolution,coordMin)
+
+    !> Starting time
+    real(mk), intent(in) :: initial_time
+    !> Ending time
+    real(mk), intent(in) :: final_time
+    !> Reynolds number
+    real(mk), intent(in) :: Re
+    !> Local mesh resolution
+    integer, dimension(dim3) :: resolution
+    !> coordinates of the lowest point of the domain
+    real(mk),dimension(dim3),intent(in) :: coordMin
+
+    real(mk) :: current_time,elapsed_time,time_step
+    integer :: iter,i,j,k
+    logical :: resetpos
+    real(mk) :: nu,dtMax
+    !    real(mk), dimension(:,:), pointer :: err1, err2, errinf
+    ! diagnostics 1:3 -> drag/lifts "porous" drag, 4:6 Winkelmans paper method
+    !    real(mk), dimension(dime) :: diagnostics 
+    real(mk),dimension(dime)::force
+
+    real(mk)::dvol,x,z,y
+    real(mk),dimension(dime) :: norm2Vel,nref
+    integer :: info
+    integer,parameter :: maxiter = 1
+    dvol=product(grid_step)
+    nu = 1./Re!0.5*FolkeRatio/Re!*uinf
+    current_time = initial_time
+    iter = 1
+    
+    ! Max value allowed for the time step
+    dtMax = 1e-3!0.01!grid_step(1)**2/(6.*nu)
+    print *, "dtmax", dtMax
+    ! offset for velocity to set a desired average value for flow rate.
+    ! Initial time step
+    time_step=dtMax
+
+    if(rank==0) open(10,file='diagnostics') ! Output only for one process
+    
+    !! Compute a first distribution of velocity and vorticity
+    !call computeInitialFields(time_step,resolution,grid_step,coordMin)
+!
+    ! Synchronise all procs after initialization process ...
+    call MPI_BARRIER(MPI_COMM_WORLD,info)
+    write (*,'(a,i5,a)')  '[',rank,'] -------------------------------------------------------------->  start simulation'
+    
+    resetpos = .TRUE.
+    !current_time=final_time
+    !do while(current_time <= final_time)
+    do while(iter <= maxiter)
+       elapsed_time = MPI_Wtime()
+       ! Perturbation step, only required for high Reynolds number 
+       ! if( current_time>3.0 .and. current_time<4.0) then
+       !   call perturb(velocity, current_time)
+       ! end if
+       
+       !============================================================ 
+       ! Compute the diagnostics, Noca method
+       !============================================================ 
+       !call nocaForces(force,velocity,vorticity,nu,coordMin,grid_step,time_step,dvol)
+       
+       if(rank == 0) then
+          write(*,'(i5,a,3f10.5)') iter, ' drag: ', force
+          write(10,'(11e14.5)') current_time,force
+       end if
+
+       !============================================================ 
+       ! Solve Navier-Stokes using particular method
+       !============================================================ 
+       call PPMupdateParticles2DScalar(PPMscalar2D,.true.,topo%ID,meshNum,PPMvelocity2D)
+       call RK2_2DScalar(time_step,topo%ID,meshNum,domain_ghostsize,PPMvelocity2D)
+       call PPMremesh2DScalar(topo%ID,meshNum,domain_ghostsize,PPMscalar2D)
+         
+       !============================================================ 
+       ! Solve Poisson for the new vorticity --> velocity
+       !============================================================ 
+       ! Compute velocity from vorticity
+       ! Two steps:
+       ! - solve Poisson for stream_function and update velocity from stream_function : everything is supposed
+       ! to be done in ppm routine; indeed we do not have to deal with stream_function.
+       ! - update velocity to fit with a required flow rate
+       ! Solve poisson to find velocity 
+!!$       call solve_poisson(vorticity,velocity,topo%ID,meshNum,domain_ghostsize)
+!!$       call ppm_map_field_ghost_get(topo%ID,meshNum,domain_ghostsize,info)
+!!$       call ppm_map_field_push(topo%ID,meshNum,velocity,3,info)
+!!$       call ppm_map_field_send(info)
+!!$       call ppm_map_field_pop(topo%ID,meshNum,velocity,3,domain_ghostsize,info)
+!!$       print *, "POST poisson", minval(vorticity),maxval(vorticity),minval(velocity),maxval(velocity)
+!!$       print *, 'veloc', sum(velocity(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+!!$       print *, 'vort',sum(vorticity(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(vorticity(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(vorticity(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+
+!!$
+!!$       call shiftVelocityX(reqFlowRate,grid_step,resolution,domainLength(c_Y)*domainLength(c_Z),coordMin,physDomainLowerPoint)
+!!$       call ppm_map_field_ghost_get(topo%ID,meshNum,domain_ghostsize,info)
+!!$       call ppm_map_field_push(topo%ID,meshNum,velocity, 3, info)
+!!$       call ppm_map_field_send(info)
+!!$       call ppm_map_field_pop(topo%ID,meshNum,velocity,3,domain_ghostsize,info)
+
+!!$       print *, "POST shift", minval(vorticity),maxval(vorticity),minval(velocity),maxval(velocity)
+!!$       print *, 'veloc', sum(velocity(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+!!$       print *, 'vort',sum(vorticity(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(vorticity(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(vorticity(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+
+       !============================================================ 
+       ! Penalize velocity on the sphere and on the boundaries
+!!$       !============================================================ 
+!!$       call penalise_velocity(velocity,time_step,chi_sphere,chi_boundary)
+!!$
+!!$       call ppm_map_field_ghost_get(topo%ID,meshNum,domain_ghostsize,info)
+!!$       call ppm_map_field_push(topo%ID,meshNum,velocity, 3, info)
+!!$       call ppm_map_field_send(info)
+!!$       call ppm_map_field_pop(topo%ID,meshNum,velocity,3,domain_ghostsize,info)
+!!$       
+!!$       print *, "POST penal", minval(vorticity),maxval(vorticity),minval(velocity),maxval(velocity)
+!!$       print *, 'veloc', sum(velocity(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+!       call printToVTK("post-penal",iter,velocity,vorticity,resolution,grid_step,coordMin)
+       
+       
+       !============================================================ 
+       ! Compute the new "penalized" vorticity
+       !============================================================ 
+!!$       call curlDF4(velocity,vorticity,resolution,grid_step)
+!!$       call ppm_map_field_ghost_get(topo%ID,meshNum,domain_ghostsize,info)
+!!$       call ppm_map_field_push(topo%ID,meshNum,vorticity,3,info)
+!!$       call ppm_map_field_send(info)
+!!$       call ppm_map_field_pop(topo%ID,meshNum,vorticity,3,domain_ghostsize,info)
+!!$       
+!!$
+       !============================================================ 
+       ! Compute stretch/diffusion from current velocity/vorticity
+       !============================================================ 
+!!$       call computeRHS(velocity,vorticity,rhs,resolution,grid_step,nu)
+!!$       !!call computeStretch(velocity,vorticity,rhs,resolution,grid_step)
+!!$       call ppm_map_field_ghost_get(topo%ID,meshNum,domain_ghostsize,info)
+!!$       call ppm_map_field_push(topo%ID,meshNum,rhs,3,info)
+!!$       call ppm_map_field_send(info)
+!!$       call ppm_map_field_pop(topo%ID,meshNum,rhs,3,domain_ghostsize,info)
+       
+!!$       print *, 'veloc', sum(velocity(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+!!$       print *, 'vort',sum(vorticity(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(vorticity(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(vorticity(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+!!$       print *, 'stretch',sum(rhs(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(rhs(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(rhs(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+!!$       print *, 'max v...', maxval(velocity(c_X,:,:,:,:)), maxval(velocity(c_Y,:,:,:,:)), maxval(velocity(c_Z,:,:,:,:))
+!!$       print *, 'min v...', minval(velocity(c_X,:,:,:,:)), minval(velocity(c_Y,:,:,:,:)), minval(velocity(c_Z,:,:,:,:))
+!!$       print *, 'max w...', maxval(vorticity(c_X,:,:,:,:)), maxval(vorticity(c_Y,:,:,:,:)), maxval(vorticity(c_Z,:,:,:,:))
+!!$       print *, 'min w...', minval(vorticity(c_X,:,:,:,:)), minval(vorticity(c_Y,:,:,:,:)), minval(vorticity(c_Z,:,:,:,:))
+!!$       print *, 'max s...', maxval(rhs(c_X,:,:,:,:)), maxval(rhs(c_Y,:,:,:,:)), maxval(rhs(c_Z,:,:,:,:))
+!!$       print *, 'min s...', minval(rhs(c_X,:,:,:,:)), minval(rhs(c_Y,:,:,:,:)), minval(rhs(c_Z,:,:,:,:))
+!!$
+!!$
+
+
+
+!!$       print *, 'indices ...', maxloc(vorticity(:,:,:,:,:)),'toto',minloc(vorticity(c_Y,:,:,:,:))
+!!$
+!!$
+!!$       print *, 'vort', vorticity(c_Y,-1,-1,4,1)
+!!$       print *, 'vel', velocity(c_X,1,1,4,1), velocity(c_X,1,1,128,1)
+!!$       print *, 'vel +1', velocity(c_X,1,1,5,1), velocity(c_X,1,1,129,1)
+!!$       print *, 'vel', velocity(c_X,1,1,6,1), velocity(c_X,1,1,130,1)
+!!$       print *, 'vel -1', velocity(c_X,1,1,3,1), velocity(c_X,1,1,127,1)
+!!$       print *, 'vel', velocity(c_X,1,1,2,1), velocity(c_X,1,1,126,1)
+ 
+       
+       ! Update time
+       current_time = current_time + time_step
+       
+       if(verbose) then 
+          !memoryUsed = memoryUsed + getMemoryUsedForParticles()
+          write(*,'(a,i5,a,i10)') "[", rank,"] end of iter ",iter
+          write(*,'(a,i5,a,f10.4)')  "[", rank,"] simulation time : ", MPI_WTime()-elapsed_time
+          write(*,'(a,i5,a,f14.10)')  "[", rank,"] current time step : ", time_step
+          write(*,'(a,i5,a,f10.4)')  "[", rank,"] Memory used : ",  memoryUsed
+          write(*,'(a,i5,a,f14.10)')  "[", rank,"] current time : ", current_time
+       end if
+
+       ! Compute the new time step according to vorticity maximum value
+       !    call updateTimeStep(time_step,vorticity,dtMax)
+       
+       ! Output every 20 time unit ...
+!       if(mod(current_time,20.).lt.time_step) then
+ !         call printToVTK("run",iter,velocity,vorticity,resolution,grid_step,coordMin)
+  !     end if
+       
+       iter = iter+1
+       !call MPI_BARRIER(MPI_COMM_WORLD,info)
+    enddo
+    
+    if(rank==0) close(10)
+
+  end subroutine timeLoop
+
+  subroutine updateTimeStep(dt,ref_field,dtMax)
+    
+    real(mk), dimension(:,:,:,:,:), pointer :: ref_field
+    real(mk), intent(inout) :: dt
+    real(mk),intent(in)::dtMax
+
+    real(mk) :: local_max,omega_max
+    integer :: info
+
+    local_max = maxval(ref_field(:,:,:,:,nsublist)) ! We ignore the ppm nsubs stuff ...
+    call MPI_ALLReduce(local_max,omega_max,1,MPI_DOUBLE_PRECISION,MPI_MAX,MPI_COMM_WORLD,info)
+    dt = min(0.25/omega_max,dtMax)
+    
+  end subroutine updateTimeStep
+
+
+
+
+end module NavierStokes2D
+
diff --git a/HySoP/src/main/Particles.f90 b/HySoP/src/main/Particles.f90
deleted file mode 100755
index f8b14c394..000000000
--- a/HySoP/src/main/Particles.f90
+++ /dev/null
@@ -1,369 +0,0 @@
-!> Functions dealing with particles :
-!> - initialisation, creation
-!> - remesh and interpolation
-!> - integration (push) 
-module Particles
-  
-  use ppm_module_rmsh, only : ppm_rmsh_create_part,ppm_interp_m2p, ppm_interp_p2m !, ppm_rmsh_remesh
-  use ppm_module_impose_part_bc
-  use client_data
-  use ppm_module_data, only : ppm_param_rmsh_kernel_mp4
-  use ppm_module_map_part
-  use ppm_module_util_dbg
-
-  implicit none
-
-  private
-
-  public init_particles, update_particles, push_particles, remesh,getMemoryUsedForParticles
-
-  !> cutoff values (threshold for vorticity values for which particles are created)
-  real(mk), dimension(2) :: cutoff
-  !> Current number of particles
-  integer :: npart
-  !> Particles positions
-  real(mk), dimension(:,:), pointer :: xp=>NULL()
-  !> Particles  strength (ie var carried by part, vorticity indeed)
-  real(mk), dimension(:,:), pointer :: omp=>NULL()
-  !> Particle velocities
-  real(mk), dimension(:,:), pointer :: velo_p=>NULL()
-  !> Particles RHS term
-  real(mk), dimension(:,:), pointer :: rhsp => NULL()
-  !> Backup vector for RK schemes
-  real(mk), dimension(:,:), pointer :: buffer=>NULL(),buffer2=>NULL(),buffer3=>NULL()
-  !> Size of buffer, i.e. more or less number of particles at the previous time step
-  integer :: buffer_size
-  !> Kernel for remesh ...
-  integer :: kernel
-
-contains
-  
-  !> Set required parameters for particles creations
-  subroutine init_particles()
-    
-    ! Cutoff : lower and upper bound for cutoff, i.e. for each value of the ref. field between cutoff(1) and cutoff(2), a particle will be
-    ! created.
-    cutoff(1) = 1.d-6
-    cutoff(2) = 100000
-    kernel = ppm_param_rmsh_kernel_mp4
-    npart = 0
-    buffer_size = npart
-  end subroutine init_particles
-  
-  !> Create particles distribution
-  subroutine update_particles(field_on_grid,resetpos,topoid,meshid,vel)
-    ! The field used to create particles
-    real(mk), dimension(:,:,:,:,:), pointer :: field_on_grid
-    !  true to reset distribution else false
-    logical, intent(in) :: resetpos
-    ! topo and mesh ids
-    integer, intent(in) :: topoid
-    integer, intent(in) :: meshid
-    integer :: info
-    ! velocity on grid
-    real(mk), dimension(:,:,:,:,:), pointer :: vel
-    info = 0
-    
-    ! -------->  The following call will allocate memory for xp, omp and velo_p/
-    ! -------->  And deallocation will be handled by ppm. (?)
-    !> -------->  Other variables carried by particles but not allocated during ppm_rmsh_create call
-    ! -------->  is there a better way to do this, with map_push or other ppm routines? No according to Omar. 
-    ! Call ppm func to create the particles distribution
-    call ppm_rmsh_create_part(topoid,meshid,xp,npart,omp,dime,field_on_grid,cutoff,info,resetpos,&
-         field_wp=vel,wp=velo_p,lda2=dime)
-    write(*,'(a,i5,a,i8)') '[',rank,'] initialisation with ', npart,' particles'
-    
-    !if(associated(xp)) print *, "xp shape:", rank, " ", shape(xp), npart
-    ! -------->  Ok, from here all vars carried by particles must have the following shape : dime,npart
-    !> -------->  mesh to particles for velocity  => done in ppm_rmsh_create_part 
-    !    call ppm_interp_m2p(topoid, meshid, xp, npart, velo_p, dime, kernel, ghostsize, vel,info)
-    !    print *, 'End of update particles'
-
-  end subroutine update_particles
-
-  !> time integration
-  !> Advection of particles, more or less a copy of Adrien's code.
-  subroutine push_particles(dt,topoid,meshid,ghostsize,vel,rhs)
-    !> time step
-    real(mk), intent(in) ::dt
-    ! topo and mesh ids
-    integer, intent(in) :: topoid
-    integer, intent(in) :: meshid
-    integer,  dimension(:),pointer:: ghostsize
-    ! velocity on grid
-    real(mk), dimension(:,:,:,:,:), pointer :: vel
-     ! Secondary field to be mapped to particles
-    real(mk), dimension(:,:,:,:,:), pointer :: rhs
-    
-    ! Compute new particles positions ...
-    ! Integrate ... ===> update omp
-    ! Todo: switch process between the different methods, leaded by a user-defined var? Later ...
-    call runge_kutta_2(dt,topoid,meshid,ghostsize,vel,rhs)
-    !call runge_kutta_4(dt,topoid,meshid,ghostsize,vel,rhs)
-  end subroutine push_particles
-  
-  subroutine remesh(topoid,meshid,ghostsize,field)
-    ! topo and mesh ids
-    integer, intent(in) :: topoid
-    integer, intent(in) :: meshid
-    integer,  dimension(:),pointer :: ghostsize
-    ! vorticity on grid
-    real(mk), dimension(:,:,:,:,:), pointer :: field
-    
-    integer info
-    info = -1
-    !call ppm_dbg_print_d(topoid,ghostlayer,1,1,info,xp,npart)
-    call ppm_interp_p2m(topoid,meshid,xp,npart,omp,dime,kernel,ghostsize,field,info)
-    if(info.ne.0) then
-       stop 'Particles: ppm remesh error '
-    end if
-  end subroutine remesh
-  
-  subroutine free_particles()
-    ! --------> is it required to call some specific routines to clean anything related to particles, at the end of the simulation?
-    ! or ppm_finalize do it all?  
-    ! According to Omar: do not use ppm "internal" routines but clean it yourself. 
-    if(associated(rhsp)) deallocate(rhsp)
-    if(associated(buffer)) deallocate(buffer)
-    if(associated(buffer2)) deallocate(buffer2)
-  end subroutine free_particles
-
-  !> Runge Kutta 2 for positions and 1 for vorticity
-  subroutine runge_kutta_2(dt,topoid,meshid,ghostsize,vel,rhs)
-    !> time step
-    real(mk), intent(in) ::dt
-    ! topo and mesh ids
-    integer, intent(in) :: topoid
-    integer, intent(in) :: meshid
-    integer,  dimension(:),pointer:: ghostsize
-    ! velocity on grid
-    real(mk), dimension(:,:,:,:,:), pointer :: vel
-     ! Secondary field to be mapped to particles
-    real(mk), dimension(:,:,:,:,:), pointer :: rhs
-    
-    !> local loop indices
-    integer :: i,newNpart,k
-    integer :: info
-    ! buffer must be of the same size as xp so if the number of particles has increased in comparison with previous step,
-    ! we reallocate secondary fields. 
-    ! Note Franck : Either we allocate buffer at each time step or we reallocate only when buffer_size increase. 
-    ! Since memory is our main problem, we allocate/deallocate at each time step, for the moment. 
-    !    if(buffer_size.lt.npart) then
-    !      deallocate(buffer) 
-    allocate(buffer(dime,npart))
-    buffer_size = npart
-
-    do i=1,npart
-       do k=1,dime
-          buffer(k,i)=xp(k,i)
-          xp(k,i)=buffer(k,i)+0.5*dt*velo_p(k,i)
-       end do
-    end do
-    ! Particles positions have changed ... we must map between domains
-    !    call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
-    newNpart = 0
-    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
-    call ppm_map_part_push(velo_p,dime,npart,info)    ! velocity
-    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
-    call ppm_map_part_push(buffer,dime,npart,info)    
-    call ppm_map_part_send(npart,newNpart,info)          ! send
-    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
-    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
-    call ppm_map_part_pop(velo_p,dime,npart,newNpart,info)  
-    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
-    ! Update sizes ...
-    npart = newNpart
-    !    if(size(rhsp,2).lt.npart) then
-    !      deallocate(rhsp) 
-    allocate(rhsp(dime,npart))
-    !  end if
-    
-    !! Mesh to particles for the new particles positions ...
-    !> for velocity
-    call ppm_interp_m2p(topoid,meshid,xp,npart,velo_p,dime,kernel,ghostsize,vel,info)
-    !> for rhs of vorticity eq.
-    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
-    ! Update positions according to the new velocity and vorticity with new rhs
-    
-    do i=1,npart
-       do k=1,dime
-          xp(k,i)=buffer(k,i)+dt*velo_p(k,i)
-          omp(k,i)=omp(k,i)+dt*rhsp(k,i)
-       end do
-    end do
-    ! Free memory as soon as possible ...
-    deallocate(buffer,rhsp)
-    ! Vorticity mapping ...
-    newNpart = 0
-    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
-    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
-    call ppm_map_part_send(npart,newNpart,info)          ! send
-    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
-    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
-    npart = newNpart
-    
-  end subroutine runge_kutta_2
-
-  !> Runge Kutta 4
-  subroutine runge_kutta_4(dt,topoid,meshid,ghostsize,vel,rhs)
-    !> time step
-    real(mk), intent(in) ::dt
-    ! topo and mesh ids
-    integer, intent(in) :: topoid
-    integer, intent(in) :: meshid
-    integer,  dimension(:),pointer::ghostsize
-    ! velocity on grid
-    real(mk), dimension(:,:,:,:,:), pointer :: vel
-     ! Secondary field to be mapped to particles
-    real(mk), dimension(:,:,:,:,:), pointer :: rhs
-    
-    !> local loop indices
-    integer :: i, newNpart,k
-    real(mk) :: alpha
-    !> error status
-    integer :: info
-    ! buffer is used to save intermediate values for xp and omp, thus buffersize=2*npart
-
-    allocate(buffer(dime,npart),buffer2(dime,npart),buffer3(dime,npart),rhsp(dime,npart))
-    buffer_size=npart
-    
-    !> Compute current rhs on particles
-    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
-    ! First RK stage
-    ! Velocity is up to date, following the call to update_particles
-    alpha=0.5*dt
-    do i=1,npart
-       do k=1,dime
-          buffer(k,i)=xp(k,i)
-          xp(k,i)=buffer(k,i)+alpha*velo_p(k,i)
-          buffer2(k,i)=velo_p(k,i)
-          buffer3(k,1)=rhsp(k,i)
-       end do
-    end do
-    ! Particles positions have changed ... we must map between domains
-    !    call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
-    newNpart = 0
-    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
-    call ppm_map_part_push(velo_p,dime,npart,info)    ! velocity
-    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
-    call ppm_map_part_push(buffer,dime,npart,info)    
-    call ppm_map_part_push(buffer2,dime,npart,info)    
-    call ppm_map_part_push(buffer3,dime,npart,info)    
-    call ppm_map_part_push(rhsp,dime,npart,info)    
-    call ppm_map_part_send(npart,newNpart,info)          ! send
-    call ppm_map_part_pop(rhsp,dime,npart,newNpart,info)
-    call ppm_map_part_pop(buffer3,dime,npart,newNpart,info)
-    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
-    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
-    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
-    call ppm_map_part_pop(velo_p,dime,npart,newNpart,info)  
-    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
-    ! Update sizes ...
-    npart = newNpart
- 
-    !! Mesh to particles for the new particles positions ...
-    !> for velocity and rhs
-    call ppm_interp_m2p(topoid,meshid,xp,npart,velo_p,dime,kernel,ghostsize,vel,info)
-    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
-
-    !! Second RK4 stage, with the updated velocity
-    do i=1,npart
-       do k=1,dime
-          xp(k,i)=buffer(k,i)+alpha*velo_p(k,i)
-          buffer2(k,i)=buffer2(k,i)+2.*velo_p(k,i)
-          buffer3(k,i)=buffer3(k,i)+2.*rhsp(k,i)
-       end do
-    end do
-        newNpart = 0
-    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
-    call ppm_map_part_push(velo_p,dime,npart,info)    ! velocity
-    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
-    call ppm_map_part_push(buffer,dime,npart,info)    
-    call ppm_map_part_push(buffer2,dime,npart,info)    
-    call ppm_map_part_push(buffer3,dime,npart,info)    
-    call ppm_map_part_push(rhsp,dime,npart,info)    
-    call ppm_map_part_send(npart,newNpart,info)          ! send
-    call ppm_map_part_pop(rhsp,dime,npart,newNpart,info)
-    call ppm_map_part_pop(buffer3,dime,npart,newNpart,info)
-    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
-    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
-    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
-    call ppm_map_part_pop(velo_p,dime,npart,newNpart,info)  
-    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
-    ! Update sizes ...
-    npart = newNpart
-
-    !! Mesh to particles for the new particles positions ...
-    !> for velocity and rhs
-    call ppm_interp_m2p(topoid,meshid,xp,npart,velo_p,dime,kernel,ghostsize,vel,info)
-    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
-
-    !! Third RK4 stage
-    alpha=dt
-    do i=1,npart
-       do k=1,dime
-          xp(k,i)=buffer(k,i)+alpha*velo_p(k,i)
-          buffer2(k,i)=buffer2(k,i)+2.*velo_p(k,i)
-          buffer3(k,i)=buffer3(k,i)+2.*rhsp(k,i)
-       end do
-    end do
-    newNpart = 0
-    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
-    call ppm_map_part_push(velo_p,dime,npart,info)    ! velocity
-    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
-    call ppm_map_part_push(buffer,dime,npart,info)    
-    call ppm_map_part_push(buffer2,dime,npart,info)    
-    call ppm_map_part_push(buffer3,dime,npart,info)    
-    call ppm_map_part_push(rhsp,dime,npart,info)    
-    call ppm_map_part_send(npart,newNpart,info)          ! send
-    call ppm_map_part_pop(rhsp,dime,npart,newNpart,info)
-    call ppm_map_part_pop(buffer3,dime,npart,newNpart,info)
-    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
-    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
-    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
-    call ppm_map_part_pop(velo_p,dime,npart,newNpart,info)  
-    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
-    ! Update sizes ...
-    npart = newNpart
-
-    !! Mesh to particles for the new particles positions ...
-    !> for velocity and rhs
-    call ppm_interp_m2p(topoid,meshid,xp,npart,velo_p,dime,kernel,ghostsize,vel,info)
-    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
-    
-    !! Last RK4 stage
-    alpha=dt/6.
-    do i=1,npart
-       do k=1,dime
-          xp(k,i)=buffer(k,i)+alpha*buffer2(k,i)+alpha*velo_p(k,i)
-          omp(k,i)=omp(k,i)+alpha*buffer3(k,i)+alpha*rhsp(k,i)
-       end do
-    end do
-    
-    ! Free memory as soon as possible ...
-    deallocate(buffer,buffer2,buffer3,rhsp)
-    ! Vorticity mapping ...
-    newNpart = 0
-    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
-    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
-    call ppm_map_part_send(npart,newNpart,info)          ! send
-    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
-    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
-    npart = newNpart
-    
-  end subroutine runge_kutta_4
-
-  !> Return the memory used to save var. attached to particles
-  function getMemoryUsedForParticles()
-    real(mk) :: getMemoryUsedForParticles
-    
-    getMemoryUsedForParticles = sizeof(xp)+sizeof(velo_p)+sizeof(rhsp)+sizeof(buffer)+&
-         sizeof(buffer2)+sizeof(buffer3)
-    getMemoryUsedForParticles = getMemoryUsedForParticles*1e-6
-    if(verbose) then
-       write(*,'(a,i3,a,f10.4,a)') &
-            '[',rank,'] memory used for particles:', getMemoryUsedForParticles, ' MB.'
-    end if
-
-  end function getMemoryUsedForParticles
-end module Particles
diff --git a/HySoP/src/main/client_data.f90 b/HySoP/src/main/client_data.f90
deleted file mode 100755
index 0d68f62bb..000000000
--- a/HySoP/src/main/client_data.f90
+++ /dev/null
@@ -1,14 +0,0 @@
-!> Some global parameters and variables
-module client_data
-
-  implicit none 
-  
-  integer, parameter :: mk = kind(1.0d0) ! double precision  
-  integer, parameter :: dime = 3 ! Problem dimension
-  real(mk), parameter :: pi = 4.0*atan(1.0_mk)
-  integer :: rank ! current mpi-processus rank
-  integer :: nbprocs ! total number of mpi processus
-  integer,parameter :: c_X=1,c_Y=2,c_Z=3 ! trick to identify coordinates in a more user-friendly way 
-  logical,parameter :: verbose = .True. ! to activate (or not) screen output
-
-end module client_data
diff --git a/HySoP/src/main/main.cxx b/HySoP/src/main/main.cxx
index 0d9fa43a2..504c4e63e 100644
--- a/HySoP/src/main/main.cxx
+++ b/HySoP/src/main/main.cxx
@@ -19,27 +19,10 @@ using Parmes::Def::real_t;
 extern "C" void createTopoG(int*, int*, int*, double*, double*, int*, double*);
 extern "C" void plouhmans();
 extern "C" void NavierStokes3D();
+extern "C" void NavierStokes2D();
+extern "C" void testPoisson2D();
 extern "C" void testMain();
-
-void testPlouhmans()
-{
-#ifdef USE_MPI
-  MPI::Init();
-  assert(MPI::Is_initialized());
-#endif
-  NavierStokes3D();
-  //testMain();
-#ifdef USE_MPI
-  MPI::Finalize();
-#endif
-}
-
-//void test0()
-// {
-//   double t0; 
-//   int info;
-  
-
+//!extern "C" void Scalar3D();
 
 //   // =====  Physical domain definition =====
 //   // Problem dimension
@@ -98,6 +81,18 @@ void testPlouhmans()
 
 int main(int argc, char* argv[])
 {
-  testPlouhmans();
+#ifdef USE_MPI
+  MPI::Init();
+  assert(MPI::Is_initialized());
+#endif
+//  NavierStokes3D();
+  std::cout << "C++ calls Fortran ..." << std::endl;
+  testPoisson2D();
+  //NavierStokes2D();
+  //Scalar3D();
+  //testMain();
+#ifdef USE_MPI
+  MPI::Finalize();
+#endif
 }
 
diff --git a/HySoP/src/main/parmesTools.f90 b/HySoP/src/parmesTools.f90
similarity index 100%
rename from HySoP/src/main/parmesTools.f90
rename to HySoP/src/parmesTools.f90
diff --git a/HySoP/src/poisson/Poisson.f90 b/HySoP/src/poisson/Poisson.f90
new file mode 100755
index 000000000..45f960e08
--- /dev/null
+++ b/HySoP/src/poisson/Poisson.f90
@@ -0,0 +1,301 @@
+!> Poisson solvers 
+!! solvers for :
+!! \f{eqnarray*} \Delta \psi &=& - \omega \\ velocity = \nabla \times \psi \f}
+module poisson
+
+  use client_data
+    
+  use client_topology, only : createTopologyY,getPPMLocalResolution,topoY
+
+  use ppm_module_map
+  
+  use Domain
+
+  implicit none
+
+  include "fftw3-mpi.f03"
+
+  !> plan for fftw "r2c" transform
+  type(C_PTR) :: planr2c
+  !> plan for fftw "c2r" transform
+  type(C_PTR) :: planc2r
+  !> GLOBAL number of points in each dir, for fftw
+  integer(C_INTPTR_T),dimension(dime) :: fft_resolution
+  !> LOCAL resolution in the direction of distribution
+  integer(c_INTPTR_T) :: local_ny
+  !> Offset in the direction of distribution
+  integer(c_INTPTR_T) :: local_j_offset
+  !> Field (real values) for fftw input
+  real(C_DOUBLE), pointer :: rin(:,:,:),rinY(:,:)
+  !> Field (complex values) for fftw output
+  complex(C_DOUBLE_COMPLEX), pointer :: cout(:,:),coutY(:,:)
+  !> Pointer to rin/cout used to enforce memory alignment for fftw. For details, see
+  !! http://www.fftw.org/fftw3_doc/FFTW-MPI-Fortran-Interface.html#FFTW-MPI-Fortran-Interface
+  type(C_PTR) :: p,pY
+  !> Normalisation factor 
+  real(mk),dimension(dime) :: coefFFT
+  !> id of the fftw topology
+  integer :: topoFFTid
+  !> id of the fftw mesj
+  integer :: meshFFTid
+contains
+
+  !> Prepare fftw plans for solvers based on fft.
+  !! This routine is based on fftw-mpi and requires f2003 C-fortran capabilities.
+  subroutine initFFTW2D(resolution,lengths)
+    
+    !> GLOBAL resolution
+    integer, dimension(:), pointer :: resolution
+    !> Length of the domain
+    real(mk),dimension(dim3),intent(in) :: lengths
+
+    integer(C_INTPTR_T) :: i,j,alloc_local,nx2,nz
+    
+    !! init fftw-mpi context
+    call fftw_mpi_init()
+    
+    !! Set fft resolution (global)
+    !! We can not use resolution in fft_mpi routines because C_INTPTR_T type is required
+    !! Note also that we need to set boundary values to enforce periodicity 
+    !! (last point will not be computed by fftw).
+    fft_resolution = resolution(1:dime)-1
+    
+    !! === Memory allocations === 
+    ! Get local sizes (the data are distributed according to the last dim)
+    nx2 = fft_resolution(c_X)/2+1
+    alloc_local = fftw_mpi_local_size_2d(fft_resolution(c_Y),nx2,MPI_COMM_WORLD,local_ny,local_j_offset)
+    nz = 1 ! "false" dim, required to fit with ppm-like arrays rank
+    p = fftw_alloc_complex(alloc_local)
+    call c_f_pointer(p, rin, [2*nx2,local_ny,nz])
+    call c_f_pointer(p, cout, [nx2,local_ny])
+    pY = fftw_alloc_complex(alloc_local)
+    call c_f_pointer(pY, rinY, [2*nx2,local_ny])
+    call c_f_pointer(pY, coutY, [nx2,local_ny])
+
+    !! === Create plans ===
+    !! Warnings :
+    !! - we use FFTW_MEASURE which means that rin may be modified by plans creation; it should 
+    !! be initialized AFTER these calls. 
+    !! - dimensions are in reverse order in plan creation (Fortran-C interface)
+   
+    !! plan for fftw "r2c" transform
+    !planr2c = fftw_mpi_plan_dft_r2c_2d(fft_resolution(c_Y),fft_resolution(c_X),rin,cout,&
+    !    MPI_COMM_WORLD,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_OUT))
+    planr2c = fftw_mpi_plan_dft_r2c_2d(fft_resolution(c_Y),fft_resolution(c_X),rin,cout,MPI_COMM_WORLD,FFTW_MEASURE)
+
+    !! plan for fftw "c2r" transform
+    !planc2r = fftw_mpi_plan_dft_c2r_2d(fft_resolution(c_Y),fft_resolution(c_X),cout,rin,&
+    !     MPI_COMM_WORLD,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
+    planc2r = fftw_mpi_plan_dft_c2r_2d(fft_resolution(c_Y),fft_resolution(c_X),cout,rin,MPI_COMM_WORLD,FFTW_MEASURE)
+
+    !> normalisation factor : 
+    coefFFT = 2.*pi/lengths(1:dime)
+
+    !> Create a fftw-dedicated topology 
+    call createTopologyY(topoFFTid,meshFFTid,physDomainLowerPoint,physDomainUpperPoint,domain_bc,resolution)
+    
+  end subroutine initFFTW2D
+     
+
+  !> Delete all the fftw stuff (plans, Cpointers ...)
+  subroutine cleanFFTW()
+    
+    call fftw_destroy_plan(planr2c)
+    call fftw_destroy_plan(planc2r)
+    call fftw_free(p)
+    call fftw_free(pY)
+    call fftw_mpi_cleanup()
+    
+  end subroutine cleanFFTW
+
+  
+  subroutine poissonFilter()
+    
+    integer(C_INTPTR_T) :: i,j,nx,ny,indy,nny
+!    real(C_DOUBLE) :: normK
+    complex(C_DOUBLE_COMPLEX) :: normK
+
+    normK = 0.0
+    open(52,file="nK")
+    print *, "start poisson filter ...", local_ny,fft_resolution(c_X)
+    nx = fft_resolution(c_X)/2+1
+    ny = fft_resolution(c_Y)/2+1
+    print *, "nx", nx
+
+    nny = min(ny,local_ny)
+
+    print *, "iauuauazhuzah", shape(cout),shape(coutY),local_ny,ny,local_j_offset
+
+    do j = 1,nny
+       do i = 1,nx
+          if (j==1.and.i==1) then
+             normK = 0.0
+          else 
+             normK = Icmplx/((coefFFT(c_X)*(i-1))**2 + (coefFFT(c_Y)*(j-1))**2)
+          end if
+          cout(i,j) = cout(i,j)*normK*coefFFT(c_Y)*(j-1)
+          coutY(i,j) = -coutY(i,j)*normK*coefFFT(c_X)*(i-1)
+          write(52,'(i5,i5,2e14.5)') i-1,j-1,normK
+       end do
+    end do
+
+    print *, "loop 2 ;;.",nny+1,local_ny,nx
+
+    do j = nny+1,local_ny
+       do i = 1,nx
+          indy=j-1-fft_resolution(c_Y)
+          normK = Icmplx/((coefFFT(c_X)*(i-1))**2 + (coefFFT(c_Y)*indy)**2)
+          cout(i,j) = cout(i,j)*normK*coefFFT(c_Y)*indy
+          coutY(i,j) = -coutY(i,j)*normK*coefFFT(c_X)*(i-1)
+          write(52,'(i5,i5,2e14.5)') i-1,indy,normK
+       end do
+    end do
+
+    close(52)
+  end subroutine poissonFilter
+
+  subroutine diffusionFilter(fieldIn,fieldOut,nu,dt)
+
+    !> Input field (on grid)
+    real(mk),dimension(:,:,:,:),pointer :: fieldIn
+    !> Output field (on grid)
+    real(mk),dimension(:,:,:,:),pointer :: fieldOut
+    !> viscosity
+    real(mk),intent(in)::nu
+    !> Time step
+    real(mk),intent(in)::dt
+
+
+
+    
+  end subroutine diffusionFilter
+  
+  subroutine solvePoisson(fieldIn,fieldOut,resolution,topoid,meshid)
+    
+    real(mk),dimension(:,:,:),pointer :: fieldIn
+    real(mk),dimension(:,:,:,:),pointer :: fieldOut
+    
+    !> ID of the 3D cartesian topology
+    integer, intent(in) :: topoid
+    !> Id of the initial mesh
+    integer, intent(in) :: meshid
+
+    integer, dimension(dim3),intent(in) :: resolution
+
+    integer(C_INTPTR_T) :: i, j
+    
+    ! transfer data from fieldIn on initial topology to rin on fft topology
+    call mapToFFT(fieldIn,rin,topoid,meshid)
+    
+    ! compute transform (as many times as desired)
+    call fftw_execute_dft_r2c(planr2c,rin,cout)
+    coutY(:,:) = cout(:,:)
+    
+    print *, "fft resol ...", size(cout,1)
+!!$    open(67,file="r2c")
+!!$    do i = 1,size(cout,1)
+!!$       write(67,'(128e14.5)') cout(i,:)
+!!$    end do
+!!$    close(67)
+!!$    
+    !!  filter ...
+    call poissonFilter()
+
+    print *, "end of filter ..."
+    cout = cout/product(fft_resolution)
+    coutY = coutY/product(fft_resolution)
+    
+    !! fftw backward
+    call fftw_execute_dft_c2r(planc2r,cout,rin)
+
+    call fftw_execute_dft_c2r(planc2r,coutY,rinY)
+    
+    ! transfer data back to the initial topology
+    call mapFromFFT(rin,rinY,fieldOut,topoid,resolution)
+
+  end subroutine solvePoisson
+  
+  !> Map data (fieldIn) from current (cartesian) topogy to fft dedicated topology
+  subroutine mapToFFT(fieldIn,fftField,fromtopo,frommesh)
+    
+    !> Original field (on topoid)
+    real(mk),dimension(:,:,:),pointer :: fieldIn
+    !> Resulting field (on topo fft)
+    real(C_DOUBLE), pointer :: fftField(:,:,:)
+    !> Id of the 3D cartesian topo and mesh
+    integer, intent(in) :: fromtopo
+    !> Id of the initial mesh
+    integer, intent(in) :: frommesh
+    
+    integer(C_INTPTR_T) :: i,j
+
+    integer :: info
+
+    integer,dimension(dime), parameter :: zeros = (/0,0/)
+
+    integer,dimension(dim3) :: resolTmp
+
+
+    resolTmp = getPPMLocalResolution(topoY,meshFFTid)
+
+!!    fftField = 0.0
+!!$    ! Input field 
+!!$    do j = 1, local_ny
+!!$       do i = 1, fft_resolution(c_X)
+!!$          fftField(i,j,1) = fieldIn(i,j,1)
+!!$       end do
+!!$    end do
+    
+!!$    allocate(tmp(1:resolTmp(c_X),1:resolTmp(c_Y),1))
+!!$    tmp = 0.0
+
+    print *, rank, "test sizes ...",shape(fftField),"///",shape(fieldIn)
+    
+    call ppm_map_field_global(fromtopo,topoFFTid,frommesh,meshFFTid,info)
+    call ppm_map_field_push(fromtopo,frommesh,fieldIn,info)
+    call ppm_map_field_send(info)
+    call ppm_map_field_pop(topoFFTid,meshFFTid,fftField,zeros,info)
+!!$    do j = 1,grid_resolution(c_Y)
+!!$       print *, rank,j, 'f in', fieldIn(:,j,1)
+!!$    end do
+!!$    
+!!$    do j = 1,resolTmp(c_Y)
+!!$       print *, rank,j, 'tmp tmp', fftField(:,j,1)
+!!$    end do
+!!$    
+!!$    open(68,file="rin")
+!!$    do i = 1,fft_resolution(c_X)
+!!$       write(68,'(64e14.5)') fftField(i,1:fft_resolution(c_Y))
+!!$    end do
+!!$    close(68)
+!!$
+
+  end subroutine mapToFFT
+
+  !> Map data (output) from fft topology to  (cartesian) topogy 
+  subroutine mapFromFFT(fftFieldX,fftFieldY,fieldOut,totopo,resolution)
+    
+    !> Output fields, on fft topology
+    real(C_DOUBLE), pointer :: fftFieldX(:,:,:),fftFieldY(:,:)
+    !> Output field on original topology (totopo)
+    real(mk),dimension(:,:,:,:),pointer :: fieldOut
+    !> Global resolution
+    integer,dimension(dim3),intent(in) :: resolution
+    !> Id of the 3D cartesian topo
+    integer, intent(in) :: totopo
+
+    integer(C_INTPTR_T) :: i,j
+    
+    do j = 1, local_ny
+       do i = 1, fft_resolution(c_X)
+          fieldOut(c_X,i,j,1) = fftFieldX(i,j,1)
+          fieldOut(c_Y,i,j,1) = fftFieldY(i,j)
+       end do
+       ! Apply periodic BC
+       fieldOut(:,resolution(c_X),j,1) = fieldOut(:,1,j,1)
+    end do
+    fieldOut(:,:,resolution(c_Y),1) = fieldOut(:,:,1,1)
+
+  end subroutine mapFromFFT
+
+end module poisson
diff --git a/HySoP/src/main/Solver.f90 b/HySoP/src/poisson/Solver.f90
similarity index 52%
rename from HySoP/src/main/Solver.f90
rename to HySoP/src/poisson/Solver.f90
index a87831eaa..2b9c3a941 100755
--- a/HySoP/src/main/Solver.f90
+++ b/HySoP/src/poisson/Solver.f90
@@ -1,15 +1,23 @@
 !> Poisson Solver on the grid
+!
+! Notes Franck concerning multigrid solver :
+! - the old (before jan 2012) version of this solver was bugged in ppm. I let the Parmes routines calling this solver 
+! in the present file for future debugs, but they are not (really not ...) usable.
+
 module Solver
 
   use client_data, only: mk, dime,rank
-  use ppm_module_mg_init
   use ppm_module_numerics_data, only: ppm_param_eq_poisson, ppm_param_smooth_rbsor
-  use ppm_module_mg_solv
   use ppm_module_poisson, only: ppm_poisson_init, ppm_poisson_plan, ppm_poisson_grn_pois_per, ppm_poisson_solve, &
        ppm_poisson_drv_curl_fd2, ppm_poisson_drv_curl_fd4, ppm_poisson_drv_none
   use client_topology
   use ppm_module_typedef, only : ppm_param_mesh_coarsen
   use ppm_module_mesh_derive, only: ppm_mesh_derive
+!  use ppmljk_poisson
+  
+  !! All multigrid stuff ...
+  !!use ppm_module_mg_init
+  !!use ppm_module_mg_solv
 
   implicit none
 
@@ -31,63 +39,63 @@ module Solver
 contains
   
   !> Do not work, bug in ppm numerics ...
-  subroutine init_multigrid(topoid, meshid, ghostsize, bc)
-    
-    integer, intent(in) :: topoid
-    integer, intent(in) :: meshid
-    integer, dimension(:), pointer :: ghostsize
-    integer, dimension(:), pointer :: bc
-
-    integer,  dimension(dime,2*dime):: ibcdef
-    real(mk), dimension(dime,1,1,1,1):: ibcvalue
-
-    logical :: with_w_cycles, dump_info
-    real(mk) :: omega
-    integer :: limlev ! Number of mg levels
-    integer :: info
-
-    info = -1
-    limlev = 2 ! Warning, limlev must cope with grid resolution ...
-    omega = 1.15_mk ! Relaxation param
-    with_w_cycles = .FALSE.
-    dump_info = .TRUE.
-    ibcvalue(:,:,:,:,:)=0.0_mk
-    ibcdef(1,:)=bc(:)
-    ibcdef(2,:)=bc(:)
-    ibcdef(3,:)=bc(:)
-    ! Initialize the solver
-    !! ppm_param_eq_poisson : equation type. Poisson is the only possible type ...
-    !! ppm_param_smooth_rbsor : Gauss Seidel, only possible type
-    ! Anyway, at the time the two parameters above are not used in ppm routine. 
-    !! maxlev : number of levels in multigrid
-    !! 
-    
-    call ppm_mg_init(topoid, ppm_param_eq_poisson,ghostsize, ppm_param_smooth_rbsor,dime,ibcdef,&
-         ibcvalue,meshid,limlev,with_w_cycles,dump_info,omega,info)
-   
-    if(info.ne.0) stop 'Init_multigrid failed ...'
-    
-  end subroutine init_multigrid
-
-  !> Do not work
-  subroutine solve_poisson_multigrid(topoid, field, rhs)
-    integer, intent(in) :: topoid
-    real(mk), dimension(:,:,:,:,:), pointer :: field
-    real(mk), dimension(:,:,:,:,:), pointer :: rhs
-
-    integer :: itera, iterf, iter1, iter2, info
-    real(mk) :: Eu
-    
-    itera = 3
-    iterf = 3
-    iter1 = 10
-    iter2 = 4
-    print *, 'solve ...', topoid
-
-    ! Bug inside ...
-    call ppm_mg_solv(topoid, field, rhs, dime, itera, iterf, iter1, iter2, Eu, info)
-
-  end subroutine solve_poisson_multigrid
+!!$  subroutine init_multigrid(topoid, meshid, ghostsize, bc)
+!!$    
+!!$    integer, intent(in) :: topoid
+!!$    integer, intent(in) :: meshid
+!!$    integer, dimension(:), pointer :: ghostsize
+!!$    integer, dimension(:), pointer :: bc
+!!$
+!!$    integer,  dimension(dime,2*dime):: ibcdef
+!!$    real(mk), dimension(dime,1,1,1,1):: ibcvalue
+!!$
+!!$    logical :: with_w_cycles, dump_info
+!!$    real(mk) :: omega
+!!$    integer :: limlev ! Number of mg levels
+!!$    integer :: info
+!!$
+!!$    info = -1
+!!$    limlev = 2 ! Warning, limlev must cope with grid resolution ...
+!!$    omega = 1.15_mk ! Relaxation param
+!!$    with_w_cycles = .FALSE.
+!!$    dump_info = .TRUE.
+!!$    ibcvalue(:,:,:,:,:)=0.0_mk
+!!$    ibcdef(1,:)=bc(:)
+!!$    ibcdef(2,:)=bc(:)
+!!$    ibcdef(3,:)=bc(:)
+!!$    ! Initialize the solver
+!!$    !! ppm_param_eq_poisson : equation type. Poisson is the only possible type ...
+!!$    !! ppm_param_smooth_rbsor : Gauss Seidel, only possible type
+!!$    ! Anyway, at the time the two parameters above are not used in ppm routine. 
+!!$    !! maxlev : number of levels in multigrid
+!!$    !! 
+!!$    
+!!$    call ppm_mg_init(topoid, ppm_param_eq_poisson,ghostsize, ppm_param_smooth_rbsor,dime,ibcdef,&
+!!$         ibcvalue,meshid,limlev,with_w_cycles,dump_info,omega,info)
+!!$   
+!!$    if(info.ne.0) stop 'Init_multigrid failed ...'
+!!$    
+!!$  end subroutine init_multigrid
+!!$
+!!$  !> Do not work
+!!$  subroutine solve_poisson_multigrid(topoid, field, rhs)
+!!$    integer, intent(in) :: topoid
+!!$    real(mk), dimension(:,:,:,:,:), pointer :: field
+!!$    real(mk), dimension(:,:,:,:,:), pointer :: rhs
+!!$
+!!$    integer :: itera, iterf, iter1, iter2, info
+!!$    real(mk) :: Eu
+!!$    
+!!$    itera = 3
+!!$    iterf = 3
+!!$    iter1 = 10
+!!$    iter2 = 4
+!!$    print *, 'solve ...', topoid
+!!$
+!!$    ! Bug inside ...
+!!$    call ppm_mg_solv(topoid, field, rhs, dime, itera, iterf, iter1, iter2, Eu, info)
+!!$
+!!$  end subroutine solve_poisson_multigrid
 
   !> Init fftw through ppm routines
   subroutine init_fftw(fieldin,fieldout,topoid,meshid,deriveValue)
@@ -101,15 +109,21 @@ contains
     
     ! Flag to select built-in Green functions (...)
     integer :: green
-
+    integer :: der
+    
+    if (present(deriveValue)) then
+       der = deriveValue
+    else
+       der = ppm_poisson_drv_curl_fd4
+    end if
+    
     green = ppm_poisson_grn_pois_per ! periodic boundaries
     info = -1
-    
-    !deriveValue = ppm_poisson_drv_curl_fd2
     allocate(fftwplanForppm)
     ! Call ppm routine to initialize fftw plan. 
-    call ppm_poisson_init(topoid,meshid,fftwplanForppm,fieldin,fieldout,green,info,derive=deriveValue)
-
+    call ppm_poisson_init(topoid,meshid,fftwplanForppm,fieldin,fieldout,green,info,derive=ppm_poisson_drv_curl_fd4)
+    !call mypoisson_init(topoid,meshid,fftwplanForppm,fieldin,fieldout,info)
+    
     if(info.NE.0) stop 'PPM Poisson solver init failed.'
   end subroutine init_fftw
 
@@ -128,6 +142,7 @@ contains
     
     ! Solves laplacian(fieldout) = - fieldin
     call ppm_poisson_solve(topoid,meshid,fftwplanForppm,fieldin,fieldout,ghostsize,info)
+    !call mypoisson_solve(topoid,meshid,fftwplanForppm,fieldin,fieldout,ghostsize,info)
     if(info.NE.0) stop 'PPM Poisson solver failed.'
     
     ! info = -1
diff --git a/HySoP/src/poisson/poisson_init.f90 b/HySoP/src/poisson/poisson_init.f90
new file mode 100755
index 000000000..42ad26088
--- /dev/null
+++ b/HySoP/src/poisson/poisson_init.f90
@@ -0,0 +1,597 @@
+module ppmljk_poisson
+
+  use client_data
+  use ppm_module_poisson
+  use ppm_module_mktopo
+  use ppm_module_topo_get
+  use ppm_module_mesh_define
+  use ppm_module_map_field
+  use ppm_module_map_field_global
+  use ppm_module_map
+  use mpi
+
+  implicit none
+
+  integer,dimension(dim3), parameter :: zeros = (/0,0,0/)
+
+  !#define __ZEROSI (/0,0,0/)
+
+contains
+
+  !> Initialisation of Poisson solver (fft), based on ppm and on a copy on the 
+  !! equivalent file in ppm
+  subroutine mypoisson_init(topoid,meshid,ppmpoisson,fieldin,fieldout,info)
+!!! * ppm_poisson_grn_pois_per - Poisson equation, periodic boundaries
+!!!
+!!! [NOTE]
+!!! fieldin is not preserved by this routine!
+!!! fieldin and fieldout must NOT be the same fields. In-place FFTs have
+!!! not been implemented.
+!!!
+
+    !> the topology id
+    integer, intent(in) :: topoid
+    !> corresponding mesh id
+    integer, intent(in) :: meshid
+    !> fftw plan interface
+    type(ppm_poisson_plan),intent(out) :: ppmpoisson
+    !> input field (i.e. rhs of poisson eq.)
+    !@ strictly speaking fieldin is not being used in the init routine
+    real(mk), dimension(:,:,:,:,:), pointer :: fieldin
+    !> output field
+    real(mk), dimension(:,:,:,:,:), pointer :: fieldout
+    !> error status
+    integer, intent(out) :: info
+
+    !-------------------------------------------------------------------------
+    ! Local variables
+    !-------------------------------------------------------------------------
+    !  real(mk)                            :: t0
+    real(mk),dimension(:,:),pointer     :: xp=>NULL()      !particle positions
+    TYPE(ppm_t_topo),pointer                :: topology=>NULL()
+    TYPE(ppm_t_equi_mesh)                   :: mesh
+    integer ,dimension(dime)               :: indl,indu
+    real(mk),PARAMETER                  :: PI=ACOS(-1.0_mk) !@ use ppm pi
+    real(mk)                            :: normfac
+!!!factor for the Greens function, including FFT normalization
+    integer                                 :: i,j,k
+    integer                                 :: kx,ky,kz
+    integer                                 :: isubl,isub
+    integer,dimension(dime*2)              :: bcdef
+    integer                                 :: assigning
+    integer                                 :: decomposition
+    integer,SAVE                            :: ttopoid
+    integer                                 :: tmeshid
+    real(mk)                            :: Lx2,Ly2,Lz2
+
+    real(mk),dimension(dime)           :: tmpmin,tmpmax
+    integer, dimension(:),pointer           :: maxndataxy=>NULL(),maxndataz=>NULL()
+    integer, dimension(:  ), pointer        :: dummynmxy=>NULL(),dummynmz=>NULL()
+
+    !-------------------------------------------------------------------------
+    ! Initialise routine
+    !-------------------------------------------------------------------------
+    !  CALL substart('ppm_poisson_init',t0,info)
+
+    ppmpoisson%case  = ppm_poisson_grn_pois_per
+
+    !-------------------------------------------------------------------------
+    ! Nullify pointers from the ppmpoisson plans and the fftplans
+    !-------------------------------------------------------------------------
+    NULLIFY(xp)
+    NULLIFY(ppmpoisson%costxy)
+    NULLIFY(ppmpoisson%istartxy)
+    NULLIFY(ppmpoisson%ndataxy)
+    NULLIFY(ppmpoisson%istartxyc)
+    NULLIFY(ppmpoisson%ndataxyc)
+    NULLIFY(ppmpoisson%costz)
+    NULLIFY(ppmpoisson%istartz)
+    NULLIFY(ppmpoisson%ndataz)
+    NULLIFY(ppmpoisson%planfxy%plan)
+    NULLIFY(ppmpoisson%planbxy%plan)
+    NULLIFY(ppmpoisson%planfz%plan)
+    NULLIFY(ppmpoisson%planbz%plan)
+
+    !-------------------------------------------------------------------------
+    ! Get topology and mesh values of input/output
+    !-------------------------------------------------------------------------
+    call ppm_topo_get(topoid,topology,info)
+    mesh  = topology%mesh(meshid)
+
+    !-------------------------------------------------------------------------
+    ! Setup mesh sizes for intermediate meshes/topologies
+    !-------------------------------------------------------------------------
+    !size of real slabs
+    ppmpoisson%nmxy (1) =  mesh%nm(1)
+    ppmpoisson%nmxy (2) =  mesh%nm(2)
+    ppmpoisson%nmxy (3) =  mesh%nm(3)
+    !size of complex slabs
+    ppmpoisson%nmxyc(1) = (mesh%nm(1)-1)/2+1
+    !!ppmpoisson%nmxyc(1) =  mesh%nm(1)
+    ppmpoisson%nmxyc(2) =  mesh%nm(2)
+    ppmpoisson%nmxyc(3) =  mesh%nm(3)
+    !size of complex pencils
+    ppmpoisson%nmz  (1) = (ppmpoisson%nmxyc(1))
+    ppmpoisson%nmz  (2) = (ppmpoisson%nmxyc(2))
+    ppmpoisson%nmz  (3) = (ppmpoisson%nmxyc(3))
+    !size of the fft
+    ppmpoisson%nmfft(1) =  mesh%nm(1)-1
+    ppmpoisson%nmfft(2) =  mesh%nm(2)-1
+    ppmpoisson%nmfft(3) =  mesh%nm(3)-1
+    !Inverse of the size of the domain squared
+    Lx2 = 1.0_mk/(topology%max_physd(1)-topology%min_physd(1))**2
+    Ly2 = 1.0_mk/(topology%max_physd(2)-topology%min_physd(2))**2
+    Lz2 = 1.0_mk/(topology%max_physd(3)-topology%min_physd(3))**2
+
+    !-------------------------------------------------------------------------
+    ! Create temporary derivation arrays if necessary
+    !-------------------------------------------------------------------------
+    ppmpoisson%derivatives = ppm_poisson_drv_curl_sp
+    
+    !-------------------------------------------------------------------------
+    ! Create spectral scaling components always. Just in case some 
+    ! reprojection comes up
+    ! The conditionals need to be for not just the Poisson equation
+    !-------------------------------------------------------------------------
+    ppmpoisson%normkx = &
+         & 2.0_mk*PI/(topology%max_physd(1)-topology%min_physd(1))
+    ppmpoisson%normky = &
+         & 2.0_mk*PI/(topology%max_physd(2)-topology%min_physd(2))
+    ppmpoisson%normkz = &
+         & 2.0_mk*PI/(topology%max_physd(3)-topology%min_physd(3))
+
+    !-------------------------------------------------------------------------
+    ! Create new slab topology
+    !-------------------------------------------------------------------------
+    ttopoid = 0
+    tmeshid = -1
+    decomposition       = ppm_param_decomp_xy_slab
+    assigning           = ppm_param_assign_internal
+    bcdef               = ppm_param_bcdef_periodic
+    tmpmin              = topology%min_physd
+    tmpmax              = topology%max_physd
+
+    CALL ppm_mktopo(ttopoid,tmeshid,xp,0,&
+         & decomposition,assigning,&
+         & tmpmin,tmpmax,bcdef,&
+         & zeros,ppmpoisson%costxy,&
+         & ppmpoisson%nmxy,info)
+
+    ppmpoisson%topoidxy = ttopoid
+    ppmpoisson%meshidxy = tmeshid
+    !-------------------------------------------------------------------------
+    ! Get additional xy-mesh information
+    !-------------------------------------------------------------------------
+    CALL ppm_topo_get_meshinfo(ppmpoisson%topoidxy,ppmpoisson%meshidxy, &
+         & dummynmxy,ppmpoisson%istartxy,ppmpoisson%ndataxy,maxndataxy, &
+         & ppmpoisson%isublistxy,ppmpoisson%nsublistxy,info)
+
+
+    !-------------------------------------------------------------------------
+    ! Create complex slab mesh
+    !-------------------------------------------------------------------------
+    ttopoid = ppmpoisson%topoidxy
+    tmeshid = -1
+    CALL ppm_mesh_define(ttopoid,tmeshid,&
+         & ppmpoisson%nmxyc,ppmpoisson%istartxyc,ppmpoisson%ndataxyc,info)
+    ppmpoisson%meshidxyc = tmeshid
+
+
+    !-------------------------------------------------------------------------
+    ! Create new pencil topology
+    !-------------------------------------------------------------------------
+    ttopoid = 0
+    tmeshid = -1
+    bcdef               = ppm_param_bcdef_periodic
+    assigning       = ppm_param_assign_internal
+    decomposition   = ppm_param_decomp_zpencil
+
+    CALL ppm_mktopo(ttopoid,tmeshid,xp,0,&
+         & decomposition,assigning,&
+         & tmpmin,tmpmax,bcdef,&
+         & zeros,ppmpoisson%costz,&
+         & ppmpoisson%nmz,info)
+
+    ppmpoisson%topoidz = ttopoid
+    ppmpoisson%meshidz = tmeshid
+    !-------------------------------------------------------------------------
+    ! Get additional z-mesh information
+    !-------------------------------------------------------------------------
+    CALL ppm_topo_get_meshinfo(ppmpoisson%topoidz,ppmpoisson%meshidz, &
+         & dummynmz,ppmpoisson%istartz,ppmpoisson%ndataz,maxndataz, &
+         & ppmpoisson%isublistz,ppmpoisson%nsublistz,info)
+
+    !-------------------------------------------------------------------------
+    ! Set and get minimum and maximum indicies
+    !-------------------------------------------------------------------------
+    indl(1) = 1
+    indl(2) = 1
+    indl(3) = 1
+
+    !-------------------------------------------------------------------------
+    ! Allocate real xy slabs
+    !-------------------------------------------------------------------------
+    ALLOCATE(ppmpoisson%fldxyr(dime,&
+         & indl(1):maxndataxy(1),indl(2):maxndataxy(2),indl(3):maxndataxy(3),&
+         & 1:ppmpoisson%nsublistxy),stat=info)
+
+
+    !-------------------------------------------------------------------------
+    ! Set and get minimum and maximum indicies of COMPLEX xy slabs
+    !-------------------------------------------------------------------------
+    indl(1) = 1
+    indl(2) = 1
+    indl(3) = 1
+    indu(1) = 0
+    indu(2) = 0
+    indu(3) = 0
+    DO isub=1,ppmpoisson%nsublistxy
+       isubl = ppmpoisson%isublistxy(isub)
+       indu(1) = MAX(indu(1),ppmpoisson%ndataxyc(1,isubl))
+       indu(2) = MAX(indu(2),ppmpoisson%ndataxyc(2,isubl))
+       indu(3) = MAX(indu(3),ppmpoisson%ndataxyc(3,isubl))
+    ENDDO
+
+
+    !-------------------------------------------------------------------------
+    ! Allocate complex xy slabs
+    !-------------------------------------------------------------------------
+    ALLOCATE(ppmpoisson%fldxyc(dime,&
+         & indl(1):indu(1),indl(2):indu(2),indl(3):indu(3),&
+         & 1:ppmpoisson%nsublistxy),stat=info)
+
+
+    !-------------------------------------------------------------------------
+    ! Allocate two complex z pencils + Greens fcn array !@check return vars.
+    !-------------------------------------------------------------------------
+    ALLOCATE(ppmpoisson%fldzc1(dime,&
+         & indl(1):maxndataz(1),indl(2):maxndataz(2),indl(3):maxndataz(3),&
+         & 1:ppmpoisson%nsublistz),stat=info)
+
+    ALLOCATE(ppmpoisson%fldzc2(dime,&
+         & indl(1):maxndataz(1),indl(2):maxndataz(2),indl(3):maxndataz(3),&
+         & 1:ppmpoisson%nsublistz),stat=info)
+
+
+    !-------------------------------------------------------------------------
+    ! The complex Greens function is always kept on the z-pencil topology
+    !-------------------------------------------------------------------------
+    ALLOCATE(ppmpoisson%fldgrnr(&
+         & indl(1):maxndataz(1),indl(2):maxndataz(2),indl(3):maxndataz(3),&
+         & 1:ppmpoisson%nsublistz),stat=info)
+
+    !-------------------------------------------------------------------------
+    ! Set up xy FFT plans
+    ! The inverse plan takes the returning topology since it has the full size
+    !-------------------------------------------------------------------------
+    CALL ppm_fft_forward_2d(ppmpoisson%topoidxy,ppmpoisson%meshidxy,&
+         & ppmpoisson%planfxy,ppmpoisson%fldxyr,&
+         & ppmpoisson%fldxyc,info)
+
+    CALL ppm_fft_backward_2d(ppmpoisson%topoidxy,ppmpoisson%meshidxy,&
+         & ppmpoisson%planbxy,ppmpoisson%fldxyc,&
+         & ppmpoisson%fldxyr,info)
+
+
+    !-------------------------------------------------------------------------
+    ! Set up z FFT plans
+    !-------------------------------------------------------------------------
+    CALL ppm_fft_forward_1d(ppmpoisson%topoidz,ppmpoisson%meshidz,&
+         & ppmpoisson%planfz,ppmpoisson%fldzc1,&
+         & ppmpoisson%fldzc2,info)
+
+    CALL ppm_fft_backward_1d(ppmpoisson%topoidz,ppmpoisson%meshidz,&
+         & ppmpoisson%planbz,ppmpoisson%fldzc2,&
+         & ppmpoisson%fldzc1,info)
+
+
+    !-------------------------------------------------------------------------
+    ! Compute Greens function. Analytic, periodic
+    !
+    ! (d2_/dx2 + d2_/dy2 + d2_/dz2)psi = -omega     =>
+    ! -4*pi2(kx2 + ky2 + kz2)PSI       = -OMEGA     =>
+    ! PSI                              = 1/(4*pi2)*1/(kx2 + ky2 + kz2)OMEGA
+    !-------------------------------------------------------------------------
+    ! Scaling the spectral coefficients... 
+    ! one minus due to (i*k)^2 and another due to the Poisson equation
+    normfac = 1.0_mk/(4.0_mk*PI*PI * &
+                                !and normalisation of FFTs (full domain) !vertex
+         & real((ppmpoisson%nmfft(1))* &
+         &      (ppmpoisson%nmfft(2))* &
+         &      (ppmpoisson%nmfft(3)),mk))
+    DO isub=1,ppmpoisson%nsublistz
+       isubl=ppmpoisson%isublistz(isub)
+       DO k=1,ppmpoisson%ndataz(3,isubl)
+          DO j=1,ppmpoisson%ndataz(2,isubl)
+             DO i=1,ppmpoisson%ndataz(1,isubl)
+                kx = i-1 + (ppmpoisson%istartz(1,isubl)-1)
+                ky = j-1 + (ppmpoisson%istartz(2,isubl)-1)
+                kz = k-1 + (ppmpoisson%istartz(3,isubl)-1)
+                !This is a nasty way to do this but it is only done once so...:
+                IF (kx .GT. (ppmpoisson%nmfft(1)/2)) kx = kx-(ppmpoisson%nmfft(1))
+                IF (ky .GT. (ppmpoisson%nmfft(2)/2)) ky = ky-(ppmpoisson%nmfft(2))
+                IF (kz .GT. (ppmpoisson%nmfft(3)/2)) kz = kz-(ppmpoisson%nmfft(3))
+                ppmpoisson%fldgrnr(i,j,k,isub) = &
+                     & normfac/(real(kx*kx,mk)*Lx2 &
+                     &        + real(ky*ky,mk)*Ly2 &
+                     &        + real(kz*kz,mk)*Lz2)
+                !Take care of singularity
+                !This is nasty as well
+                IF ((kx*kx+ky*ky+kz*kz) .EQ. 0) THEN
+                   ppmpoisson%fldgrnr(i,j,k,isub) = 0.0_mk
+                ENDIF
+             ENDDO
+          ENDDO
+       ENDDO
+    ENDDO
+  end subroutine mypoisson_init
+
+  subroutine mypoisson_solve(topoid,meshid,ppmpoisson,fieldin,fieldout,gstw,info)
+
+
+    INTEGER, INTENT(IN)                                         :: topoid
+!!! Topology ID
+    INTEGER, INTENT(IN)                                         :: meshid
+!!! Mesh ID
+    TYPE(ppm_poisson_plan),INTENT(INOUT)                        :: ppmpoisson
+!!! The PPM Poisson plan
+    REAL(mk),DIMENSION(:,:,:,:,:),POINTER                   :: fieldin
+!!! Input data field
+    REAL(mk),DIMENSION(:,:,:,:,:),POINTER                   :: fieldout
+!!! Output data field
+    INTEGER,DIMENSION(dime),INTENT(IN)                         :: gstw
+!!! Ghost layer width
+    INTEGER, INTENT(OUT)                                        :: info
+!!! Return status, 0 upon succes
+
+    !-------------------------------------------------------------------------
+    ! Local variables
+    !-------------------------------------------------------------------------
+
+    REAL(mk)                      :: t0
+    INTEGER                           :: isub,isubl
+    INTEGER                           :: i,j,k
+    INTEGER                           :: info2
+    INTEGER                           :: presentcase
+    COMPLEX(mk)                   :: divomega
+    INTEGER                           :: gi,gj,gk
+    COMPLEX(mk)                   :: kx,ky,kz
+    COMPLEX(mk)                   :: phix,phiy,phiz
+    REAL(mk)                      :: normfac
+
+    !-------------------------------------------------------------------------
+    ! Check if we run a different/temporary case
+    !-------------------------------------------------------------------------
+
+    presentcase = ppmpoisson%case
+
+    !-----------------------------------------------------------------------
+    ! Map data globally to the slabs (XY)
+    ! This is where the vorticity is extended and padded with 0 for free-space
+    !-----------------------------------------------------------------------
+    !Initialise
+    CALL ppm_map_field_global(topoid,ppmpoisson%topoidxy,meshid,ppmpoisson%meshidxy,info)
+    !Push the data
+    CALL ppm_map_field_push(topoid,meshid,fieldin,3,info)
+    CALL ppm_map_field_send(info)
+    !Retrieve
+    CALL ppm_map_field_pop(ppmpoisson%topoidxy,ppmpoisson%meshidxy,ppmpoisson%fldxyr,3,zeros,info)
+
+    !-----------------------------------------------------------------------
+    ! Do slab FFT (XY) - use the xy topology as its extent has not been halved
+    !-----------------------------------------------------------------------
+    CALL ppm_fft_execute_2d(ppmpoisson%topoidxy,&
+         & ppmpoisson%meshidxy, ppmpoisson%planfxy, &
+         & ppmpoisson%fldxyr, ppmpoisson%fldxyc, &
+         & info)
+
+    !-----------------------------------------------------------------------
+    ! Map to the pencils (Z)
+    !-----------------------------------------------------------------------
+    !Initialise
+    CALL ppm_map_field_global(&
+         & ppmpoisson%topoidxy, &
+         & ppmpoisson%topoidz, &
+         & ppmpoisson%meshidxyc, &
+         & ppmpoisson%meshidz,info)
+
+    !Push the data
+    CALL ppm_map_field_push(&
+         & ppmpoisson%topoidxy, &
+         & ppmpoisson%meshidxyc,ppmpoisson%fldxyc,3,info)
+    !Send
+    CALL ppm_map_field_send(info)
+
+    !Retrieve
+    CALL ppm_map_field_pop(&
+         & ppmpoisson%topoidz, &
+         & ppmpoisson%meshidz,ppmpoisson%fldzc1, &
+         & 3,zeros,info)
+
+    !-----------------------------------------------------------------------
+    ! Do pencil FFT (Z)
+    !-----------------------------------------------------------------------
+    CALL ppm_fft_execute_1d(ppmpoisson%topoidz,&
+         & ppmpoisson%meshidz, ppmpoisson%planfz, &
+         & ppmpoisson%fldzc1, ppmpoisson%fldzc2, &
+         & info)
+
+
+    !-----------------------------------------------------------------------
+    ! Apply the periodic Greens function
+    !-----------------------------------------------------------------------
+    DO isub=1,ppmpoisson%nsublistz
+       isubl=ppmpoisson%isublistz(isub)
+       DO k=1,ppmpoisson%ndataz(3,isubl)
+          DO j=1,ppmpoisson%ndataz(2,isubl)
+             DO i=1,ppmpoisson%ndataz(1,isubl)
+                ppmpoisson%fldzc2(1,i,j,k,isub) = ppmpoisson%fldgrnr( i,j,k,isub)*&
+                     & ppmpoisson%fldzc2(1,i,j,k,isub)
+                ppmpoisson%fldzc2(2,i,j,k,isub) = ppmpoisson%fldgrnr( i,j,k,isub)*&
+                     & ppmpoisson%fldzc2(2,i,j,k,isub)
+                ppmpoisson%fldzc2(3,i,j,k,isub) = ppmpoisson%fldgrnr( i,j,k,isub)*&
+                     & ppmpoisson%fldzc2(3,i,j,k,isub)
+             ENDDO
+          ENDDO
+       ENDDO
+    ENDDO
+
+
+    !-----------------------------------------------------------------------
+    ! Spectral derivatives
+    ! normkx, etc contains 2pi/Lx
+    !-----------------------------------------------------------------------
+    normfac = 1.0_MK/ REAL((ppmpoisson%nmfft(1))* & !vertex
+         & (ppmpoisson%nmfft(2))* &
+         & (ppmpoisson%nmfft(3)),MK)
+    DO isub=1,ppmpoisson%nsublistz
+       isubl=ppmpoisson%isublistz(isub)
+       DO k=1,ppmpoisson%ndataz(3,isubl)
+          gk = k - 1 + (ppmpoisson%istartz(3,isubl)-1)
+          IF (gk .GT. (ppmpoisson%nmfft(3)/2)) gk = gk-(ppmpoisson%nmfft(3))
+          kz = CMPLX(0.0_MK,REAL(gk,MK),MK)*ppmpoisson%normkz
+          DO j=1,ppmpoisson%ndataz(2,isubl)
+             gj = j - 1 + (ppmpoisson%istartz(2,isubl)-1)
+             IF (gj .GT. (ppmpoisson%nmfft(2)/2)) gj = gj-(ppmpoisson%nmfft(2))
+             ky = CMPLX(0.0_MK,REAL(gj,MK),MK)*ppmpoisson%normky
+             DO i=1,ppmpoisson%ndataz(1,isubl)
+                gi = i - 1 + (ppmpoisson%istartz(1,isubl)-1)
+                IF (gi .GT. (ppmpoisson%nmfft(1)/2)) gi = gi-(ppmpoisson%nmfft(1))
+                kx = CMPLX(0.0_MK,REAL(gi,MK),MK)*ppmpoisson%normkx
+                
+                phix = ppmpoisson%fldzc2(1,i,j,k,isub)
+                phiy = ppmpoisson%fldzc2(2,i,j,k,isub)
+                phiz = ppmpoisson%fldzc2(3,i,j,k,isub)
+                
+                ppmpoisson%fldzc2(1,i,j,k,isub) = (ky*phiz-kz*phiy)
+                ppmpoisson%fldzc2(2,i,j,k,isub) = (kz*phix-kx*phiz)
+                ppmpoisson%fldzc2(3,i,j,k,isub) = (kx*phiy-ky*phix)
+             ENDDO
+          ENDDO
+       ENDDO
+    ENDDO
+
+    !-----------------------------------------------------------------------
+    ! IFFT pencil (Z)
+    !-----------------------------------------------------------------------
+    CALL ppm_fft_execute_1d(ppmpoisson%topoidz,&
+         & ppmpoisson%meshidz, ppmpoisson%planbz, &
+         & ppmpoisson%fldzc2, ppmpoisson%fldzc1, &
+         & info)
+
+    !-----------------------------------------------------------------------
+    ! Map back to slabs (XY)
+    !-----------------------------------------------------------------------
+    !Initialise
+    CALL ppm_map_field_global(&
+         & ppmpoisson%topoidz, &
+         & ppmpoisson%topoidxy, &
+         & ppmpoisson%meshidz, &
+         & ppmpoisson%meshidxyc,info)
+    !Push the data
+    CALL ppm_map_field_push(&
+         & ppmpoisson%topoidz, &
+         & ppmpoisson%meshidz,ppmpoisson%fldzc1,3,info)
+
+    !Send
+    CALL ppm_map_field_send(info)
+
+    !Retrieve
+    CALL ppm_map_field_pop(&
+         & ppmpoisson%topoidxy, &
+         & ppmpoisson%meshidxyc,ppmpoisson%fldxyc, &
+         & 3,zeros,info)
+
+    !-----------------------------------------------------------------------
+    ! IFFT (XY) use the non-reduced topology
+    !-----------------------------------------------------------------------
+    CALL ppm_fft_execute_2d(ppmpoisson%topoidxy,&
+         & ppmpoisson%meshidxy, ppmpoisson%planbxy, &
+         & ppmpoisson%fldxyc, ppmpoisson%fldxyr, &
+         & info)
+
+
+    !-----------------------------------------------------------------------
+    ! Map back to standard topology (XYZ)
+    !-----------------------------------------------------------------------
+    !Initialise
+    CALL ppm_map_field_global(&
+         & ppmpoisson%topoidxy, &
+         & topoid, &
+         & ppmpoisson%meshidxy, &
+         & meshid,info)
+    !Push the data
+    CALL ppm_map_field_push(&
+         & ppmpoisson%topoidxy, &
+         & ppmpoisson%meshidxy,ppmpoisson%fldxyr,3,info)
+
+    !Send
+    CALL ppm_map_field_send(info)
+
+    !-------------------------------------------------------------------------
+    ! FINAL RETRIEVE - Here we do different things depending on the task
+    ! i.e. the receiver varies
+    !-------------------------------------------------------------------------
+    IF ((ppmpoisson%derivatives .EQ. ppm_poisson_drv_curl_fd2 .OR. &
+         &  ppmpoisson%derivatives .EQ. ppm_poisson_drv_curl_fd4) ) THEN
+       CALL ppm_map_field_pop(&
+            & topoid, &
+            & meshid,ppmpoisson%drv_vr, &
+            & 3,gstw,info)
+       !-------------------------------------------------------------------------
+       ! Ghost the temporary array for derivatives (drv_vr)
+       !-------------------------------------------------------------------------
+       CALL ppm_map_field_ghost_get(topoid,meshid,gstw,info)
+       CALL ppm_map_field_push(topoid,meshid,ppmpoisson%drv_vr,3,info)
+       CALL ppm_map_field_send(info)
+       CALL ppm_map_field_pop(topoid,meshid,ppmpoisson%drv_vr,3,gstw,info)
+
+    ELSE
+       CALL ppm_map_field_pop(&
+            & topoid, &
+            & meshid,fieldout, &
+            & 3,gstw,info)
+    ENDIF
+
+    !-------------------------------------------------------------------------
+    ! Treat ghost layer to make FD stencils work
+    !-------------------------------------------------------------------------
+    IF (ppmpoisson%derivatives .EQ. ppm_poisson_drv_curl_fd2) THEN
+       CALL ppm_poisson_extrapolateghost(topoid,meshid,ppmpoisson%drv_vr,&
+            & 2,4,gstw,info)
+    ENDIF
+    IF (ppmpoisson%derivatives .EQ. ppm_poisson_drv_curl_fd4 .AND.&
+         & (presentcase .EQ. ppm_poisson_grn_pois_fre)) THEN
+       CALL ppm_poisson_extrapolateghost(topoid,meshid,ppmpoisson%drv_vr,&
+            & 2,4,gstw,info)
+    ENDIF
+
+    !-------------------------------------------------------------------------
+    ! Optionally do derivatives
+    ! Perhaps make ppm_poisson_fd take _none as argument. Then maybe no
+    ! if-statement is required
+    !-------------------------------------------------------------------------
+    IF (presentcase .NE. ppm_poisson_grn_reprojec) THEN
+       IF (ppmpoisson%derivatives .EQ. ppm_poisson_drv_curl_fd2) THEN
+          CALL ppm_poisson_fd(topoid,meshid,ppmpoisson%drv_vr,fieldout,&
+               & ppm_poisson_drv_curl_fd2,info)
+       ENDIF
+       IF (ppmpoisson%derivatives .EQ. ppm_poisson_drv_curl_fd4) THEN
+          CALL ppm_poisson_fd(topoid,meshid,ppmpoisson%drv_vr,fieldout,&
+               & ppm_poisson_drv_curl_fd4,info)
+       ENDIF
+    ENDIF
+
+    !-------------------------------------------------------------------------
+    ! Finally ghost the velocity/stream function field before returning it
+    ! Also extrapolate if freespace
+    !-------------------------------------------------------------------------
+    CALL ppm_map_field_ghost_get(topoid,meshid,gstw,info)
+    CALL ppm_map_field_push(topoid,meshid,fieldout,3,info)
+    CALL ppm_map_field_send(info)
+    CALL ppm_map_field_pop(topoid,meshid,fieldout,3,gstw,info)
+
+  end subroutine mypoisson_solve
+
+
+end module ppmljk_poisson
diff --git a/HySoP/src/ppmInterface/PPMFields.f90 b/HySoP/src/ppmInterface/PPMFields.f90
new file mode 100755
index 000000000..bb2e45920
--- /dev/null
+++ b/HySoP/src/ppmInterface/PPMFields.f90
@@ -0,0 +1,80 @@
+!> Declaration, allocation of all the fields on the grid.
+module PPMFields
+  
+  use client_data
+  use client_topology, only: nsublist
+
+  implicit none
+
+  !> Velocity 3D (PPM-style storage) 
+  real(mk), dimension(:,:,:,:,:), pointer :: PPMvelocity3D => NULL()
+  !> Velocity 2D (PPM-style storage)
+  real(mk), dimension(:,:,:,:), pointer :: PPMvelocity2D => NULL()
+  !> Vorticity 3D (PPM-style storage) 
+  real(mk), dimension(:,:,:,:,:), pointer :: PPMvorticity3D => NULL()
+  !> Vorticity 2D (PPM-style storage)
+  real(mk), dimension(:,:,:), pointer :: PPMvorticity2D => NULL()
+  !> RHS 3D (PPM-style storage) 
+  real(mk), dimension(:,:,:,:,:), pointer :: PPMrhs3D => NULL()
+  !> A Scalar on a 3D grid (PPM-style storage) 
+  real(mk), dimension(:,:,:,:), pointer :: PPMscalar3D => NULL()
+  !> A Scalar on a 2D grid (PPM-style storage) 
+  real(mk), dimension(:,:,:), pointer :: PPMscalar2D => NULL()
+  
+contains
+  
+  !> Fields allocation. 
+  !! Warning : ghostpoints must be included in field (i.e. size = "real size" + 2*number of ghostpoints)
+  subroutine initPPMFields3D(resolution,ghostsize)
+    
+    !> Required resolution for the fields (without ghosts)
+    integer, dimension(dime), intent(in) :: resolution
+    !> number of ghost points in each direction (ghost(c_X) = 2 means resolution(c_X)+ 4 points for the field)
+    integer, dimension(:),pointer:: ghostsize
+    
+    integer::istat
+    ! Lower and upper bounds for fields
+    integer, dimension(dime) :: ldl, ldu
+    ! nsublist from ppm topo. Assumed to be equal to 1 see Topology.f90
+    
+    ldl = 1 - ghostsize
+    ldu = resolution + ghostsize
+    ! Velocity ...
+    allocate(PPMvelocity3D(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist),stat = istat)
+    if(istat.ne.0) stop 'Field allocation error for velocity (PPM)'
+    ! Vorticity
+    allocate(PPMvorticity3D(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
+    if(istat.ne.0) stop 'Field allocation error for vorticity (PPM))'
+    ! rhs
+    allocate(PPMrhs3D(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
+    if(istat.ne.0) stop 'Field allocation error for rhs (PPM))'
+
+  end subroutine initPPMFields3D
+
+  subroutine initPPMFields2D(resolution,ghostsize)
+    
+    !> Required resolution for the fields (without ghosts)
+    integer, dimension(dime), intent(in) :: resolution
+    !> number of ghost points in each direction (ghost(c_X) = 2 means resolution(c_X)+ 4 points for the field)
+    integer, dimension(:),pointer:: ghostsize
+    
+    integer::istat
+    ! Lower and upper bounds for fields
+    integer, dimension(dime) :: ldl, ldu
+    ! nsublist from ppm topo. Assumed to be equal to 1 see Topology.f90
+    
+    ldl = 1 - ghostsize
+    ldu = resolution + ghostsize
+    ! Velocity ...
+    allocate(PPMvelocity2D(dime,ldl(1):ldu(1),ldl(2):ldu(2),1),stat = istat)
+    if(istat.ne.0) stop 'Field allocation error for velocity (PPM)'
+    ! Vorticity  (a sacalar in 2D)
+    allocate(PPMvorticity2D(ldl(1):ldu(1),ldl(2):ldu(2),1), stat = istat)
+    if(istat.ne.0) stop 'Field allocation error for vorticity (PPM))'
+    ! Scalar
+    allocate(PPMscalar2D(ldl(1):ldu(1),ldl(2):ldu(2),1), stat = istat)
+    if(istat.ne.0) stop 'Field allocation error for scalar (PPM))'
+
+  end subroutine initPPMFields2D
+
+end module PPMFields
diff --git a/HySoP/src/ppmInterface/Particles.f90 b/HySoP/src/ppmInterface/Particles.f90
new file mode 100755
index 000000000..1b7b12b5f
--- /dev/null
+++ b/HySoP/src/ppmInterface/Particles.f90
@@ -0,0 +1,1445 @@
+!> Functions dealing with particles :
+!> - initialisation, creation
+!> - remesh and interpolation
+!> - integration (push) 
+module Particles
+  
+  use ppm_module_rmsh, only : ppm_rmsh_create_part,ppm_interp_m2p, ppm_interp_p2m !, ppm_rmsh_remesh
+  use ppm_module_map_field_ghost, only:ppm_map_field_ghost_get!, ppm_map_field_ghost_put
+  use ppm_module_map_field, only : ppm_map_field_push,ppm_map_field_send,ppm_map_field_pop
+  use ppm_module_impose_part_bc
+  use client_data
+  use ppm_module_data, only : ppm_param_rmsh_kernel_mp4
+  use ppm_module_map_part
+  use ppm_module_util_dbg
+
+  implicit none
+
+  private
+
+  public initNSSolver_particles,getMemoryUsedForParticles,ScalarSolver_particles,init_parts,&
+       countAndCreateParticles, countAndUpdateParticles,PPMupdateParticles3D, PPMupdateParticles2D,&
+       freeParticles,createParticlesEverywhere,npart,RK4_2D,Ppmremesh2d,RK2_2D,PPMupdateParticles2DScalar,&
+       RK2_2DScalar,PPMremesh2DScalar,remesh2D,createParticlesEverywhereScalar,RK4_2Dscalar
+
+  !> cutoff values (threshold for vorticity values for which particles are created)
+  real(mk), dimension(2) :: cutoff
+  !> Current number of particles
+  integer :: npart
+  !> Particles positions
+  real(mk), dimension(:,:), pointer :: xp=>NULL()
+  !> Particles  strength (ie var carried by part, vorticity indeed)
+  real(mk), dimension(:,:), pointer :: omp=>NULL()
+  !> Particle velocities
+  real(mk), dimension(:,:), pointer :: velop=>NULL()
+  !> Particles RHS term
+  real(mk), dimension(:,:), pointer :: rhsp => NULL()
+  !> Particles scalar term
+  real(mk), dimension(:), pointer :: scalar_p => NULL()
+  !> Backup vector for RK schemes
+  real(mk), dimension(:,:), pointer :: buffer=>NULL(),buffer2=>NULL(),buffer3=>NULL()
+  
+  !> Size of buffer, i.e. more or less number of particles at the previous time step
+  integer :: buffer_size
+  !> Kernel for remesh ...
+  integer :: kernel
+
+contains
+
+  subroutine ScalarSolver_particles(scalar,velocity,dt,topoid,meshid,ghostsize,resetpos,step,coordMin,resolution)
+    
+    !> Vorticity field, on the grid
+    real(mk),dimension(:,:,:),pointer :: scalar
+    !> Velocity field, on the grid
+    real(mk),dimension(:,:,:,:),pointer :: velocity
+    !> Current time step
+    real(mk),intent(in) :: dt
+    !> Current topo id
+    integer, intent(in) :: topoid
+    !> Current mesh id
+    integer, intent(in) :: meshid
+    !> Number of ghost points
+    integer,  dimension(:),pointer:: ghostsize 
+    !> bool to reset (or not) particles positions
+    logical, intent(in) :: resetpos
+    real(mk),dimension(dim3),intent(in) :: coordMin,step
+    integer, dimension(dim3),intent(in) :: resolution
+    !> Error status
+    integer :: dir
+    
+    do dir=1,dime
+       !call ppm_rmsh_create_part(topoid,meshid,xp,npart,scalar_p,scalar,cutoff,info,resetpos,&
+       !  field_wp=velocity,wp=velop,lda2=dime)
+       call reset_parts(scalar,resolution,coordMin,step)
+       write(*,'(a,i5,a,i8)') '[',rank,'] initialisation with ', npart,' particles'
+       ! Integrate
+       call push_split_particles(dir,dt,topoid,topoid,ghostsize,velocity,coordMin,step)
+       ! Remesh
+       call remesh_split_mp6(scalar,dir,step,coordMin)
+          
+       if(dir == c_X) then
+          scalar(-ghostsize(c_X)+1:ghostsize(c_X)+1,:,:) = scalar(-ghostsize(c_X)+1:ghostsize(c_X)+1,:,:) &
+               + scalar(resolution(c_X)-ghostsize(c_X):resolution(c_X)+ghostsize(c_X),:,:)
+          scalar(resolution(c_X)-ghostsize(c_X):resolution(c_X)+ghostsize(c_X),:,:) = scalar(-ghostsize(c_X)&
+               +1:ghostsize(c_X)+1,:,:)
+       else if(dir == c_Y) then 
+          scalar(:,-ghostsize(c_Y)+1:ghostsize(c_Y)+1,:) = scalar(:,-ghostsize(c_Y)+1:ghostsize(c_Y)+1,:) &
+               + scalar(:,resolution(c_Y)-ghostsize(c_Y):resolution(c_Y)+ghostsize(c_Y),:)
+          scalar(:,resolution(c_Y)-ghostsize(c_Y):resolution(c_Y)+ghostsize(c_Y),:) = scalar(:,-ghostsize(c_Y)&
+               +1:ghostsize(c_Y)+1,:)
+       else
+          scalar(:,:,-ghostsize(c_Z)+1:ghostsize(c_Z)+1) = scalar(:,:,-ghostsize(c_Z)+1:ghostsize(c_Z)+1) &
+               + scalar(:,:,resolution(c_Z)-ghostsize(c_Z):resolution(c_Z)+ghostsize(c_Z))
+          scalar(:,:,resolution(c_Z)-ghostsize(c_Z):resolution(c_Z)+ghostsize(c_Z)) = scalar(:,:,-ghostsize(c_Z)&
+               +1:ghostsize(c_Z)+1)
+       end if
+    end do
+    ! Ghost values for vorticity
+!!$    call ppm_map_field_ghost_get(topoid,meshid,ghostsize,info)
+!!$    call ppm_map_field_push(topoid,meshid,scalar,info)
+!!$    call ppm_map_field_send(info)
+!!$    call ppm_map_field_pop(topoid,meshid,scalar,ghostsize,info)
+    
+  end subroutine ScalarSolver_particles
+
+  subroutine init_parts(scalar,velocity,resolution,coordMin,step)
+    real(mk),dimension(:,:,:), pointer :: scalar
+    real(mk),dimension(:,:,:,:),pointer :: velocity
+    real(mk),dimension(dim3),intent(in)::coordMin,step
+    integer, dimension(dim3),intent(in)::resolution
+
+    integer :: i,j,k,current_part
+    real(mk),dimension(dim3) :: coord
+    
+    current_part = 1
+    npart = product(resolution-1)
+    allocate(xp(dime,npart),velop(dime,npart),scalar_p(npart),buffer(dime,npart))
+    do k=1,resolution(c_Z)-1
+       coord(c_Z) = coordMin(c_Z) + (k-1)*step(c_Z)
+       do j=1,resolution(c_Y)-1
+          coord(c_Y) = coordMin(c_Y) + (j-1)*step(c_Y)
+          do i=1,resolution(c_X)-1
+             coord(c_X) = coordMin(c_X) + (i-1)*step(c_X)
+             xp(:,current_part) = coord(:)
+             velop(:,current_part) = velocity(:,i,j,k)
+             scalar_p(current_part) = scalar(i,j,k)
+             current_part = current_part + 1
+
+          end do
+       end do
+    end do
+
+    
+  endsubroutine init_parts
+
+  subroutine reset_parts(scalar,resolution,coordMin,step)
+    
+    real(mk),dimension(:,:,:), pointer :: scalar
+    real(mk),dimension(dime),intent(in)::coordMin,step
+    integer, dimension(dime),intent(in)::resolution
+
+    integer :: i,j,k,current_part
+    real(mk),dimension(dim3) :: coord
+    
+    current_part = 1
+    do k=1,resolution(c_Z)-1
+       coord(c_Z) = coordMin(c_Z) + (k-1)*step(c_Z)
+       do j=1,resolution(c_Y)-1
+          coord(c_Y) = coordMin(c_Y) + (j-1)*step(c_Y)
+          do i=1,resolution(c_X)-1
+             coord(c_X) = coordMin(c_X) + (i-1)*step(c_X)
+             xp(:,current_part) = coord(:)
+             scalar_p(current_part) = scalar(i,j,k)
+             current_part = current_part + 1
+          end do
+       end do
+    end do
+
+  endsubroutine reset_parts
+
+
+  !> Set required parameters for particles creations
+  subroutine initNSSolver_particles()
+    
+    ! Cutoff : lower and upper bound for cutoff, i.e. for each value of the ref. field between cutoff(1) and cutoff(2), a particle will be
+    ! created.
+    cutoff(1) = 1.d-8
+    cutoff(2) = 1e9!00000
+    kernel = ppm_param_rmsh_kernel_mp4
+    npart = 0
+    buffer_size = npart
+  end subroutine initNSSolver_particles
+
+  
+  
+  !> PPM-based creation/update of particles distribution
+  subroutine PPMupdateParticles3D(field_on_grid,resetpos,topoid,meshid,vel)
+    !> The field used to create particles (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: field_on_grid
+    !>  true to reset distribution else false
+    logical, intent(in) :: resetpos
+    !> topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer :: info
+    !> velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: vel
+    info = 0
+
+    ! -------->  The following call will allocate memory for xp, omp and velop
+    ! -------->  And deallocation will be handled by ppm. (?)
+    ! -------->  Other variables carried by particles but not allocated during ppm_rmsh_create call
+    ! -------->  is there a better way to do this, with map_push or other ppm routines? No according to Omar. 
+    ! Call ppm func to create the particles distribution
+    call ppm_rmsh_create_part(topoid,meshid,xp,npart,omp,dime,field_on_grid,cutoff,info,resetpos,&
+         field_wp=vel,wp=velop,lda2=dime)
+    write(*,'(a,i5,a,i8,a)') '[',rank,'] initialisation with ', npart,' particles'
+
+    !if(associated(xp)) print *, "xp shape:", rank, " ", shape(xp), npart
+    ! -------->  Ok, from here all vars carried by particles must have the following shape : dime,npart
+    !> -------->  mesh to particles for velocity  => done in ppm_rmsh_create_part 
+    !    call ppm_interp_m2p(topoid, meshid, xp, npart, velop, dime, kernel, ghostsize, vel,info)
+    !    print *, 'End of update particles'
+
+  end subroutine PPMupdateParticles3D
+
+  subroutine PPMupdateParticles2DScalar(field_on_grid,resetpos,topoid,meshid,vel)
+    !> The field used to create particles (must be in PPM-style storage)
+    real(mk), dimension(:,:,:), pointer :: field_on_grid
+    !>  true to reset distribution else false
+    logical, intent(in) :: resetpos
+    !> topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer :: info,i
+    !> velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: vel
+    info = 0
+
+    ! -------->  The following call will allocate memory for xp, omp and velop
+    ! -------->  And deallocation will be handled by ppm. (?)
+    ! -------->  Other variables carried by particles but not allocated during ppm_rmsh_create call
+    ! -------->  is there a better way to do this, with map_push or other ppm routines? No according to Omar. 
+    ! Call ppm func to create the particles distribution
+    call ppm_rmsh_create_part(topoid,meshid,xp,npart,scalar_p,field_on_grid,cutoff,info,.true.,&
+         field_wp=vel,wp=velop,lda2=dime)
+    write(*,'(a,i5,a,i8,a)') '[',rank,'] initialisation with ', npart,' particles'
+ 
+    open(45,file="omp") ! Output only for one process
+    do i = 1,npart
+       write(45,'(3(11e14.5))') xp(c_X,i),scalar_p(i)
+    end do
+    close(45)
+
+
+   !if(associated(xp)) print *, "xp shape:", rank, " ", shape(xp), npart
+    ! -------->  Ok, from here all vars carried by particles must have the following shape : dime,npart
+    !> -------->  mesh to particles for velocity  => done in ppm_rmsh_create_part 
+    !    call ppm_interp_m2p(topoid, meshid, xp, npart, velop, dime, kernel, ghostsize, vel,info)
+    !    print *, 'End of update particles'
+
+  end subroutine PPMupdateParticles2DScalar
+
+  subroutine PPMupdateParticles2D(field_on_grid,resetpos,topoid,meshid,vel)
+    !> The field used to create particles (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: field_on_grid
+    !>  true to reset distribution else false
+    logical, intent(in) :: resetpos
+    !> topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer :: info
+    !> velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: vel
+    info = 0
+
+    ! -------->  The following call will allocate memory for xp, omp and velop
+    ! -------->  And deallocation will be handled by ppm. (?)
+    ! -------->  Other variables carried by particles but not allocated during ppm_rmsh_create call
+    ! -------->  is there a better way to do this, with map_push or other ppm routines? No according to Omar. 
+    ! Call ppm func to create the particles distribution
+    call ppm_rmsh_create_part(topoid,meshid,xp,npart,omp,dime,field_on_grid,cutoff,info,resetpos,&
+         field_wp=vel,wp=velop,lda2=dime)
+    write(*,'(a,i5,a,i8,a)') '[',rank,'] initialisation with ', npart,' particles'
+
+    !if(associated(xp)) print *, "xp shape:", rank, " ", shape(xp), npart
+    ! -------->  Ok, from here all vars carried by particles must have the following shape : dime,npart
+    !> -------->  mesh to particles for velocity  => done in ppm_rmsh_create_part 
+    !    call ppm_interp_m2p(topoid, meshid, xp, npart, velop, dime, kernel, ghostsize, vel,info)
+    !    print *, 'End of update particles'
+
+  end subroutine PPMupdateParticles2D
+
+  !> time integration
+  !> Advection of particles, more or less a copy of Adrien's code.
+  subroutine push_particles(dt,topoid,meshid,ghostsize,vel,rhs)
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer:: ghostsize
+    ! velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: vel
+     ! Secondary field to be mapped to particles (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: rhs
+    
+    ! Compute new particles positions ...
+    ! Integrate ... ===> update omp
+    ! Todo: switch process between the different methods, leaded by a user-defined var? Later ...
+    ! call runge_kutta_2(dt,topoid,meshid,ghostsize,vel,rhs)
+    call RK4_3D(dt,topoid,meshid,ghostsize,vel,rhs)
+  end subroutine push_particles
+  
+  subroutine PPMremesh3D(topoid,meshid,ghostsize,field)
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer :: ghostsize
+    ! vorticity on grid, (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: field
+    
+    integer info
+    info = -1
+    !call ppm_dbg_print_d(topoid,ghostlayer,1,1,info,xp,npart)
+    call ppm_interp_p2m(topoid,meshid,xp,npart,omp,dime,kernel,ghostsize,field,info)
+    if(info.ne.0) then
+       stop 'Particles: ppm remesh error '
+    end if
+  end subroutine PPMremesh3D
+  
+  subroutine PPMremesh2D(topoid,meshid,ghostsize,field)
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer :: ghostsize
+    ! vorticity on grid, (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: field
+    
+    integer info
+    info = -1
+    !call ppm_dbg_print_d(topoid,ghostlayer,1,1,info,xp,npart)
+    call ppm_interp_p2m(topoid,meshid,xp,npart,omp,dime,kernel,ghostsize,field,info)
+    if(info.ne.0) then
+       stop 'Particles: ppm remesh error '
+    end if
+  end subroutine PPMremesh2D
+  subroutine PPMremesh2DScalar(topoid,meshid,ghostsize,field)
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer :: ghostsize
+    ! vorticity on grid, (must be in PPM-style storage)
+    real(mk), dimension(:,:,:), pointer :: field
+    
+    integer info
+    info = -1
+    !call ppm_dbg_print_d(topoid,ghostlayer,1,1,info,xp,npart)
+    call ppm_interp_p2m(topoid,meshid,xp,npart,scalar_p,kernel,ghostsize,field,info)
+    if(info.ne.0) then
+       stop 'Particles: ppm remesh error '
+    end if
+  end subroutine PPMremesh2DScalar
+  
+  !> Runge Kutta 2 for positions and 1 for vorticity
+  subroutine RK2_3D(dt,topoid,meshid,ghostsize,vel,rhs)
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer:: ghostsize
+    ! velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: vel
+     ! Secondary field to be mapped to particles (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: rhs
+    
+    !> local loop indices
+    integer :: i,newNpart,k
+    integer :: info
+    
+    ! buffer must be of the same size as xp so if the number of particles has increased in comparison with previous step,
+    ! we reallocate secondary fields. 
+    ! Note Franck : Either we allocate buffer at each time step or we reallocate only when buffer_size increase. 
+    ! Since memory is our main problem, we allocate/deallocate at each time step, for the moment. 
+    !    if(buffer_size.lt.npart) then
+    !      deallocate(buffer) 
+    allocate(buffer(dime,npart))
+    buffer_size = npart
+
+    do i=1,npart
+       do k=1,dime
+          buffer(k,i)=xp(k,i)
+          xp(k,i)=buffer(k,i)+0.5*dt*velop(k,i)
+       end do
+    end do
+    ! Particles positions have changed ... we must map between domains
+    !    call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
+    newNpart = 0
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+    !    if(size(rhsp,2).lt.npart) then
+    !      deallocate(rhsp) 
+    allocate(rhsp(dime,npart))
+    !  end if
+!!    print *, 'max min omp start', maxval(omp),minval(omp),maxval(rhsp),minval(rhsp)
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+    !> for rhs of vorticity eq.
+    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
+    ! Update positions according to the new velocity and vorticity with new rhs
+    
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+dt*velop(k,i)
+          omp(k,i)=omp(k,i)+dt*rhsp(k,i)
+       end do
+    end do
+!!    print *, 'max min omp stop', maxval(omp),minval(omp), maxval(rhsp),minval(rhsp)
+
+    ! Free memory as soon as possible ...
+    deallocate(buffer,rhsp)
+    ! Vorticity mapping ...
+    newNpart = 0
+
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    npart = newNpart
+
+  end subroutine RK2_3D
+
+  !> Runge Kutta 4
+  subroutine RK4_3D(dt,topoid,meshid,ghostsize,vel,rhs)
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer::ghostsize
+    ! velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: vel
+     ! Secondary field to be mapped to particles (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: rhs
+    
+    !> local loop indices
+    integer :: i, newNpart,k
+    real(mk) :: alpha
+    !> error status
+    integer :: info
+    ! buffer is used to save intermediate values for xp and omp, thus buffersize=2*npart
+
+    allocate(buffer(dime,npart),buffer2(dime,npart),buffer3(dime,npart),rhsp(dime,npart))
+    buffer_size=npart
+    
+    !> Compute current rhs on particles
+    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
+    ! First RK stage
+    ! Velocity is up to date, following the call to update_particles
+    alpha=0.5*dt
+    do i=1,npart
+       do k=1,dime
+          buffer(k,i)=xp(k,i)
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=velop(k,i)
+          buffer3(k,1)=rhsp(k,i)
+       end do
+    end do
+    ! Particles positions have changed ... we must map between domains
+    !    call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
+    newNpart = 0
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_push(buffer3,dime,npart,info)    
+    call ppm_map_part_push(rhsp,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(rhsp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer3,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+ 
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
+
+    !! Second RK4 stage, with the updated velocity
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=buffer2(k,i)+2.*velop(k,i)
+          buffer3(k,i)=buffer3(k,i)+2.*rhsp(k,i)
+       end do
+    end do
+    newNpart = 0
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_push(buffer3,dime,npart,info)    
+    call ppm_map_part_push(rhsp,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(rhsp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer3,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
+
+    !! Third RK4 stage
+    alpha=dt
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=buffer2(k,i)+2.*velop(k,i)
+          buffer3(k,i)=buffer3(k,i)+2.*rhsp(k,i)
+       end do
+    end do
+    newNpart = 0
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_push(buffer3,dime,npart,info)    
+    call ppm_map_part_push(rhsp,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(rhsp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer3,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
+    
+    !! Last RK4 stage
+    alpha=dt/6.
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*buffer2(k,i)+alpha*velop(k,i)
+          omp(k,i)=omp(k,i)+alpha*buffer3(k,i)+alpha*rhsp(k,i)
+       end do
+    end do
+    
+    ! Free memory as soon as possible ...
+    deallocate(buffer,buffer2,buffer3,rhsp)
+    ! Vorticity mapping ...
+    newNpart = 0
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    npart = newNpart
+    
+  end subroutine RK4_3D
+
+  !> Runge Kutta 4, for 2D domain (i.e rhs == 0).
+  subroutine RK4_2D(dt,topoid,meshid,ghostsize,vel)
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer::ghostsize
+    ! velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: vel
+    
+    !> local loop indices
+    integer :: i, newNpart,k
+    real(mk) :: alpha
+    !> error status
+    integer :: info
+    ! buffer is used to save intermediate values for xp and omp, thus buffersize=2*npart
+
+    allocate(buffer(dime,npart),buffer2(dime,npart))
+    buffer_size=npart
+   
+    ! First RK stage
+    ! Velocity is up to date, following the call to update_particles
+    alpha=0.5*dt
+    do i=1,npart
+       do k=1,dime
+          buffer(k,i)=xp(k,i)
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=velop(k,i)
+       end do
+    end do
+    ! Particles positions have changed ... we must map between domains
+    call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
+    newNpart = 0
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    print *, "NEW", npart,newNpart
+    npart = newNpart
+
+ 
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+
+    !! Second RK4 stage, with the updated velocity
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=buffer2(k,i)+2.*velop(k,i)
+       end do
+    end do
+    call ppm_impose_part_bc(topoid,xp,npart,info) 
+    newNpart = 0
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    print *, "NEW2", npart,newNpart
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+
+    !! Third RK4 stage
+    alpha=dt
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=buffer2(k,i)+2.*velop(k,i)
+       end do
+    end do
+    newNpart = 0
+    call ppm_impose_part_bc(topoid,xp,npart,info) 
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    print *, "NEW3", npart,newNpart
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+
+    !! Last RK4 stage
+    alpha=dt/6.
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*buffer2(k,i)+alpha*velop(k,i)
+       end do
+    end do
+    
+    ! Free memory as soon as possible ...
+    deallocate(buffer,buffer2)
+    ! Vorticity mapping ...
+    call ppm_impose_part_bc(topoid,xp,npart,info) 
+    newNpart = 0
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    print *, "NEW4", npart,newNpart
+    npart = newNpart
+    
+  end subroutine RK4_2D
+  
+  !> Runge Kutta 4, for 2D domain (i.e rhs == 0).
+  subroutine RK4_2DScalar(dt,topoid,meshid,ghostsize,vel)
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer::ghostsize
+    ! velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: vel
+    
+    !> local loop indices
+    integer :: i, newNpart,k
+    real(mk) :: alpha
+    !> error status
+    integer :: info
+    ! buffer is used to save intermediate values for xp and omp, thus buffersize=2*npart
+
+    allocate(buffer(dime,npart),buffer2(dime,npart))
+    buffer_size=npart
+   
+    ! First RK stage
+    ! Velocity is up to date, following the call to update_particles
+    alpha=0.5*dt
+    do i=1,npart
+       do k=1,dime
+          buffer(k,i)=xp(k,i)
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=velop(k,i)
+       end do
+    end do
+    ! Particles positions have changed ... we must map between domains
+    !call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
+    newNpart = 0
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+    !! Second RK4 stage, with the updated velocity
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=buffer2(k,i)+2.*velop(k,i)
+       end do
+    end do
+!    call ppm_impose_part_bc(topoid,xp,npart,info) 
+    newNpart = 0
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+
+    !! Third RK4 stage
+    alpha=dt
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=buffer2(k,i)+2.*velop(k,i)
+       end do
+    end do
+    newNpart = 0
+    !call ppm_impose_part_bc(topoid,xp,npart,info) 
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+
+    !! Last RK4 stage
+    alpha=dt/6.
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*buffer2(k,i)+alpha*velop(k,i)
+       end do
+    end do
+    
+    ! Scalar mapping ...
+    !call ppm_impose_part_bc(topoid,xp,npart,info) 
+    newNpart = 0
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    npart = newNpart
+    ! Free memory as soon as possible ...
+    deallocate(buffer,buffer2)
+
+  end subroutine RK4_2DSCALAR
+
+  !> Runge Kutta 4, for 2D domain (i.e rhs == 0).
+  subroutine RK2_2D(dt,topoid,meshid,ghostsize,vel)
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer::ghostsize
+    ! velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: vel
+    
+    !> local loop indices
+    integer :: i, newNpart,k
+    real(mk) :: alpha
+    !> error status
+    integer :: info
+    ! buffer is used to save intermediate values for xp and omp, thus buffersize=2*npart
+
+    allocate(buffer(dime,npart))
+    buffer_size=npart
+   
+    ! First RK stage
+    ! Velocity is up to date, following the call to update_particles
+    alpha=0.5*dt
+    do i=1,npart
+       do k=1,dime
+          buffer(k,i)=xp(k,i)
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+       end do
+    end do
+    ! Particles positions have changed ... we must map between domains
+  !  call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
+    newNpart = 0
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+
+    !! Second RK2 stage, with the updated velocity
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+dt*velop(k,i)
+       end do
+    end do
+    ! Vorticity mapping ...
+    !    call ppm_impose_part_bc(topoid,xp,npart,info) 
+    newNpart = 0
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    npart = newNpart
+    ! Free memory as soon as possible ...
+    deallocate(buffer)
+
+  end subroutine RK2_2D
+  
+  !> Runge Kutta 2, for 2D domain (i.e rhs == 0).
+  subroutine RK2_2DScalar(dt,topoid,meshid,ghostsize,vel)
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer::ghostsize
+    ! velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: vel
+    
+    !> local loop indices
+    integer :: i, newNpart,k
+    real(mk) :: alpha
+    !> error status
+    integer :: info,np0
+    ! buffer is used to save intermediate values for xp and omp, thus buffersize=2*npart
+
+    allocate(buffer(dime,npart))
+    buffer_size=npart
+    
+    np0 = npart
+
+    ! First RK stage
+    ! Velocity is up to date, following the call to update_particles
+    alpha=0.5*dt
+    do i=1,npart
+       do k=1,dime
+          buffer(k,i)=xp(k,i)
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+       end do
+    end do
+    ! Particles positions have changed ... we must map between domains
+    !call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
+    newNpart = 0
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+    !! Second RK2 stage, with the updated velocity
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+dt*velop(k,i)
+       end do
+    end do
+ !   call ppm_impose_part_bc(topoid,xp,npart,info) 
+    newNpart = 0
+    !    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    npart = newNpart
+  end subroutine RK2_2DSCALAR
+
+  !> Runge Kutta 2 for positions and 1 for vorticity
+  subroutine push_split_particles(dir,dt,topoid,meshid,ghostsize,vel,coordMin,step)
+    !> splitting direction
+    integer, intent(in) :: dir
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer:: ghostsize
+    ! velocity on grid
+    real(mk), dimension(:,:,:,:), pointer :: vel
+    real(mk),dimension(dim3),intent(in) :: coordMin,step
+    !> local loop indices
+    integer :: i,newNpart
+    integer :: info
+
+    do i=1,npart
+       buffer(dir,i)=xp(dir,i)
+       xp(dir,i)=buffer(dir,i)+0.5*dt*velop(dir,i)
+    end do
+    ! Particles positions have changed ... we must map between domains
+    !    call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
+    newNpart = 0
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+    !    if(size(rhsp,2).lt.npart) then
+
+    !> for velocity
+    !call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+    call mesh2particles(vel,coordMin,step,dir)
+    
+    do i=1,npart
+       xp(dir,i)=buffer(dir,i)+dt*velop(dir,i)
+    end do
+
+    ! Vorticity mapping ...
+    newNpart = 0
+    
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    npart = newNpart
+  end subroutine push_split_particles
+  
+  subroutine mesh2particles(velocity,coordMin,step,dir)
+
+    real(mk),dimension(:,:,:,:),pointer :: velocity
+    integer, intent(in) :: dir
+    real(mk),dimension(dim3),intent(in) :: step
+    real(mk),dimension(dim3),intent(in) :: coordMin
+
+    real(mk) :: coord,dist
+    real(mk),dimension(2) :: weights
+    integer :: i,j
+    integer, dimension(2,dime) :: indGrid
+
+    do i = 1,npart
+       do j = 1,2
+          indGrid(j,:) = ((xp(:,i) - coordMin(:))/step(:) + epsilon(pi))+1
+       enddo
+       indGrid(2,dir) = indGrid(1,dir) + 1
+     
+       coord = coordMin(dir) + (indGrid(1,dir)-1)*step(dir)
+       dist = (xp(dir,i) - coord)/step(dir)
+       weights(1) = 1.-dist
+       weights(2) = dist
+       
+       velop(dir,i) =  weights(1)*velocity(dir,indGrid(1,1),indGrid(1,2),indGrid(1,3))&
+            + weights(2)*velocity(dir,indGrid(2,1),indGrid(2,2),indGrid(2,3))
+    end do
+
+  end subroutine mesh2particles
+
+  subroutine remesh_split_mp6(scalar,dir,step,coordMin)
+    real(mk),dimension(:,:,:), pointer :: scalar
+    integer, intent(in) :: dir
+    real(mk),dimension(dim3),intent(in) :: step
+    real(mk),dimension(dim3),intent(in) :: coordMin
+
+    real(mk) :: coord,dist
+    real(mk),dimension(6) :: val,weights
+    integer :: i,j
+
+    integer, dimension(6,dime) :: indGrid
+
+    scalar = 0.0
+
+    do i = 1,npart
+       do j = 1,6
+          indGrid(j,:) = ((xp(:,i) - coordMin(:))/step(:) + epsilon(pi))+1
+       enddo
+       indGrid(1,dir) = indGrid(3,dir) - 2
+       indGrid(2,dir) = indGrid(3,dir) - 1
+       indGrid(4,dir) = indGrid(3,dir) + 1
+       indGrid(5,dir) = indGrid(3,dir) + 2
+       indGrid(6,dir) = indGrid(3,dir) + 3
+
+       coord = coordMin(dir) + (indGrid(3,dir)-1)*step(dir)
+       dist = (xp(dir,i) - coord)/step(dir)
+       weights(1) = -(dist)*(5.*(dist + 2.)-8.)*(dist - 1.)**3/24.
+       weights(2) = (dist)*(dist - 1.)*(25.*(dist + 1.)**3-114.*(dist + 1.)**2+153.*(dist + 1.)-48.)/24.
+       weights(3) = -(dist-1.)*(25.*dist**4-38.*dist**3-3.*dist**2+12.*dist+12)/12.
+       weights(4) = (dist)*(25.*(1. - dist)**4-38.*(1. - dist)**3-3.*(1. - dist)**2+12.*(1. - dist)+12)/12.
+       weights(5) = (1. - dist)*(-dist)*(25.*(2. - dist)**3-114.*(2. - dist)**2+153.*(2. - dist)-48.)/24.
+       weights(6) = -(1. - dist)*(5.*(3. - dist)-8.)*(-dist)**3/24.
+
+       val(:) = scalar_p(i)*weights(:)
+        do j = 1, 6
+           scalar(indGrid(j,c_X),indGrid(j,c_Y),indGrid(j,c_Z)) = &
+               scalar(indGrid(j,c_X),indGrid(j,c_Y),indGrid(j,c_Z)) + val(j)
+       end do
+
+    end do
+ 
+  end subroutine remesh_split_mp6
+  
+
+  !> Return the memory used to save var. attached to particles
+  function getMemoryUsedForParticles()
+    real(mk) :: getMemoryUsedForParticles
+    
+    getMemoryUsedForParticles = sizeof(xp)+sizeof(velop)+sizeof(rhsp)+sizeof(buffer)+&
+         sizeof(buffer2)+sizeof(buffer3)
+    getMemoryUsedForParticles = getMemoryUsedForParticles*1e-6
+    if(verbose) then
+       write(*,'(a,i3,a,f10.4,a)') &
+            '[',rank,'] memory used for particles:', getMemoryUsedForParticles, ' MB.'
+    end if
+
+  end function getMemoryUsedForParticles
+
+  !> Create particles on the grid points where refField>cutoff.
+  subroutine countAndCreateParticles(refField,velocity,resolution,step,coordMin)
+    
+    !> Field (on grid) used to set particles
+    real(mk), dimension(:,:,:,:), pointer :: refField
+    !> velocity on the grid
+    real(mk), dimension(:,:,:,:), pointer :: velocity
+    !> Local resolution
+    integer, dimension(dim3),intent(in) :: resolution
+    !> Space step
+    real(mk), dimension(dim3),intent(in) :: step
+    !> Coordinates of the lowest point of the current domain
+    real(mk), dimension(dim3),intent(in) :: coordMin
+    
+    ! particles counter
+    integer :: count
+    ! norm of refField at each point of the grid
+    real(mk) :: strength
+    ! coordinates
+    real(mk), dimension(dim3) :: coord
+    integer, dimension(dim3) :: nbCells
+    
+    integer :: i,j,k
+    
+    nbCells = max(resolution-1,1)
+
+    print *, "uuuu",maxval(refField)
+    
+    ! Count the number of particles within cutoff bounds.
+    ! Loop over grid points. We use 'max' to allow the '1 point in one direction' case 
+    ! (i.e. to deal with 2D case in a 3D context)
+    ! Remark : there is no resize function in fortran so we must count first, allocate and then fill fields. 
+    count = 0
+
+    print *, "str", cutoff
+   do k=1,nbCells(c_Z)
+       do j=1,nbCells(c_Y)
+          do i=1,nbCells(c_X)
+             strength = sqrt(sum(refField(:,i,j,k)**2))
+             if((strength.gt.cutoff(1)).and.(strength.lt.cutoff(2))) then
+                count = count + 1
+             end if
+          end do
+       end do
+    end do
+    
+    ! Allocations
+    allocate(xp(dime,count),omp(dime,count),velop(dime,count))
+    
+    print *, 'nb parts ...', count, shape(xp)
+
+    ! and set values
+    coord = coordMin
+    count = 0
+    do k=1,nbCells(c_Z)
+       do j=1,nbCells(c_Y)
+          do i=1,nbCells(c_X)
+             strength = sqrt(sum(refField(:,i,j,k)**2))
+             if((strength.gt.cutoff(1)).and.(strength.lt.cutoff(2))) then
+                count = count + 1
+                omp(:,count) = refField(:,i,j,k)
+                velop(:,count) = velocity(:,i,j,k)
+                xp(:,count) = coord(1:dime)
+             end if
+             coord(c_X) = coord(c_X) + step(c_X)
+          end do
+          coord(c_Y) = coord(c_Y) + step(c_Y)
+       end do
+       coord(c_Z) = coord(c_Z) + step(c_X)
+    end do
+    
+    npart = count
+    write(*,'(a,i5,a,i8,a)') '[',rank,'] initialisation with ', npart,' particles'
+
+  end subroutine countAndCreateParticles
+
+  !> Update particles on the grid points where refField>cutoff.
+  !! Note : There are deallocation/reallocation if the number of particles as increased.
+  subroutine countAndUpdateParticles(refField,velocity,resolution,step,coordMin)
+    
+    !> Field (on grid) used to set particles
+    real(mk), dimension(:,:,:,:), pointer :: refField
+    !> velocity on the grid
+    real(mk), dimension(:,:,:,:), pointer :: velocity
+    !> Local resolution
+    integer, dimension(dim3),intent(in) :: resolution
+    !> Space step
+    real(mk), dimension(dim3),intent(in) :: step
+    !> Coordinates of the lowest point of the current domain
+    real(mk), dimension(dim3),intent(in) :: coordMin
+    
+    ! particles counter
+    integer :: count
+    ! norm of refField at each point of the grid
+    real(mk) :: strength
+    ! coordinates
+    real(mk), dimension(dim3) :: coord
+    integer, dimension(dim3) :: nbCells
+    integer :: i,j,k
+       
+    nbCells = max(resolution-1,1)
+    
+    ! Count the number of particles within cutoff bounds.
+    ! Loop over grid points. We use 'max' to allow the '1 point in one direction' case 
+    ! (i.e. to deal with 2D case in a 3D context)
+    ! Remark : there is no resize function in fortran so we must count first, allocate and then fill fields. 
+    count = 0
+    do k=1,max(resolution(c_Z)-1,1)
+       do j=1,max(resolution(c_Y)-1,1)
+          do i=1,max(resolution(c_X)-1,1)
+             strength = sqrt(sum(refField(:,i,j,k)**2))
+             if((strength.gt.cutoff(1)).and.(strength.lt.cutoff(2))) then
+                count = count + 1
+             end if
+          end do
+       end do
+    end do
+    
+    ! Check if reallocation is required
+    if(count > npart) then
+       ! Free old memory
+       if(associated(xp)) deallocate(xp)
+       if(associated(omp)) deallocate(omp)
+       if(associated(velop)) deallocate(velop)
+    
+       ! Allocations
+       allocate(xp(dime,count),omp(dime,count),velop(dime,count))
+    end if
+
+    ! and set values
+    coord = coordMin
+    count = 0
+    do k=1,max(resolution(c_Z)-1,1)
+       do j=1,max(resolution(c_Y)-1,1)
+          do i=1,max(resolution(c_X)-1,1)
+             strength = sqrt(sum(refField(:,i,j,k)**2))
+             if((strength.gt.cutoff(1)).and.(strength.lt.cutoff(2))) then
+                count = count + 1
+                omp(:,count) = refField(:,i,j,k)
+                velop(:,count) = velocity(:,i,j,k)
+                xp(:,count) = coord
+             end if
+             coord(c_X) = coord(c_X) + step(c_X)
+          end do
+          coord(c_Y) = coord(c_Y) + step(c_Y)
+       end do
+       coord(c_Z) = coord(c_Z) + step(c_X)
+    end do
+    
+    npart = count
+    write(*,'(a,i5,a,i8,a)') '[',rank,'] initialisation with ', npart,' particles'
+  end subroutine countAndUpdateParticles
+
+  !> Create particles on the grid points where refField>cutoff.
+  subroutine createParticlesEverywhere(refField,velocity,resolution,step,coordMin)
+    
+    !> Field (on grid) used to set particles
+    real(mk), dimension(:,:,:,:), pointer :: refField
+    !> velocity on the grid
+    real(mk), dimension(:,:,:,:), pointer :: velocity
+    !> Local resolution
+    integer, dimension(dim3),intent(in) :: resolution
+    !> Space step
+    real(mk), dimension(dim3),intent(in) :: step
+    !> Coordinates of the lowest point of the current domain
+    real(mk), dimension(dim3),intent(in) :: coordMin
+    
+    ! coordinates
+    real(mk), dimension(dim3) :: coord
+    integer, dimension(dim3) :: nbCells
+    integer :: i,j,k,count
+    
+    nbCells = max(resolution-1,1)
+    coord = coordMin
+    count = 0
+    npart = product(nbCells)
+    ! Allocations
+    allocate(xp(dime,npart),omp(dime,npart),velop(dime,npart))
+    do k=1,nbCells(c_Z)
+       do j=1,nbCells(c_Y)
+          do i=1,nbCells(c_X)
+             count = count + 1
+             omp(:,count) = refField(:,i,j,k)
+             velop(:,count) = velocity(:,i,j,k)
+             xp(:,count) = coord
+             coord(c_X) = coord(c_X) + step(c_X)
+          end do
+          coord(c_X) = coordMin(c_X)
+          coord(c_Y) = coord(c_Y) + step(c_Y)
+       end do
+       coord(c_Y) = coordMin(c_Y)
+       coord(c_Z) = coord(c_Z) + step(c_X)
+    end do
+    
+    write(*,'(a,i5,a,i8,a)') '[',rank,'] initialisation with ', npart,' particles'
+
+  end subroutine createParticlesEverywhere
+  
+  !> Create particles on the grid points where refField>cutoff.
+  subroutine createParticlesEverywhereScalar(refField,velocity,resolution,step,coordMin)
+    
+    !> Field (on grid) used to set particles
+    real(mk), dimension(:,:,:), pointer :: refField
+    !> velocity on the grid
+    real(mk), dimension(:,:,:,:), pointer :: velocity
+    !> Local resolution
+    integer, dimension(dim3),intent(in) :: resolution
+    !> Space step
+    real(mk), dimension(dim3),intent(in) :: step
+    !> Coordinates of the lowest point of the current domain
+    real(mk), dimension(dim3),intent(in) :: coordMin
+    
+    ! coordinates
+    real(mk), dimension(dim3) :: coord
+    integer, dimension(dim3) :: nbCells
+    integer :: i,j,k,count
+    
+    nbCells = max(resolution-1,1)
+    coord = coordMin
+    count = 0
+    npart = product(nbCells)
+    ! Allocations
+    allocate(xp(dime,npart),scalar_p(npart),velop(dime,npart))
+    do k=1,nbCells(c_Z)
+       do j=1,nbCells(c_Y)
+          do i=1,nbCells(c_X)
+             count = count + 1
+             scalar_p(count) = refField(i,j,k)
+             velop(:,count) = velocity(:,i,j,k)
+             xp(:,count) = coord
+             coord(c_X) = coord(c_X) + step(c_X)
+          end do
+          coord(c_X) = coordMin(c_X)
+          coord(c_Y) = coord(c_Y) + step(c_Y)
+       end do
+       coord(c_Y) = coordMin(c_Y)
+       coord(c_Z) = coord(c_Z) + step(c_X)
+    end do
+    
+    write(*,'(a,i5,a,i8,a)') '[',rank,'] initialisation with ', npart,' particles'
+    
+    open(45,file="scalp") ! Output only for one process
+    do i = 1,npart
+       write(45,'(6e14.5)') xp(c_X,i),scalar_p(i)
+    end do
+    close(45)
+  end subroutine createParticlesEverywhereScalar
+
+
+  !> Free all arrays carried by particles
+  !! useful only for tests.
+  ! --------> is it required to call some specific routines to clean anything related to particles, at the end of the simulation?
+  ! or ppm_finalize do it all?  
+  ! According to Omar: do not use ppm "internal" routines but clean it yourself. 
+  subroutine freeParticles()
+    if(associated(xp)) deallocate(xp)
+    if(associated(omp)) deallocate(omp)
+    if(associated(velop)) deallocate(velop)
+    if(associated(rhsp)) deallocate(rhsp)
+    if(associated(buffer)) deallocate(buffer)
+    if(associated(buffer2)) deallocate(buffer2)
+    if(associated(scalar_p)) deallocate(scalar_p)
+    npart = 0
+    buffer_size = 0
+  end subroutine freeParticles
+
+  subroutine remesh2D(field,coordMin,step,resolution)
+    
+    real(mk),dimension(:,:,:),pointer :: field
+    real(mk),dimension(dim3),intent(in) :: coordMin
+    real(mk),dimension(dim3),intent(in) :: step
+    integer, dimension(dim3),intent(in) :: resolution
+
+    real(mk),dimension(dime) :: invStep
+    !! List of grid point indices for each particle : [Left-1 Left Right Right+1]
+    !! Left/Right == left/right points of the grid around the particle
+    integer, dimension(4,dime) :: indGrid
+    !! current point coordinates and distance to the left boundary of the local domain
+    real(mk),dimension(dime) :: coord,dist
+    !! weights
+    real(mk),dimension(4,dime) :: weights
+    integer:: i,j
+    invStep = 1./step(1:dime)
+    do i = 1,npart
+       indGrid(2,:) = ((xp(:,i) - coordMin(:))*invStep(:) + epsilon(pi))+1
+       indGrid(1,:) = indGrid(2,:) - 1
+       indGrid(3,:) = indGrid(2,:) + 1
+       indGrid(4,:) = indGrid(2,:) + 2
+       !enforce periodicity
+       do j =1,4
+          indGrid(j,:) = mod(indGrid(j,:)+resolution(1:dime),resolution(1:dime))+1
+       end do
+
+       coord = coordMin(1:dime) + (indGrid(2,:)-1)*step(1:dime)
+       dist = (xp(:,i) - coord)*invStep
+       
+       weights(1,:) = 0.5*dist*((1.-dist)**2)
+       weights(2,:) = 1.-2.5*dist**2+1.5*dist**3
+       weights(3,:) = 1.-2.5*(1.-dist)**2+1.5*(1.-dist)**3
+       weights(4,:) = 0.5*dist**2*(dist-1.)
+       
+       do j =1,4
+          field(indGrid(j,c_X),indGrid(:,c_Y),1) = field(indGrid(j,c_X),indGrid(:,c_Y),1)&
+               + weights(j,c_X)*weights(:,c_Y)*scalar_p(i)
+       end do
+
+    end do
+    
+  end subroutine remesh2D
+
+
+
+end module Particles
diff --git a/HySoP/src/scalesInterface/CMakeLists.txt b/HySoP/src/scalesInterface/CMakeLists.txt
new file mode 100644
index 000000000..e92c83e31
--- /dev/null
+++ b/HySoP/src/scalesInterface/CMakeLists.txt
@@ -0,0 +1,84 @@
+#=======================================================
+# parmesscales library compilation/link process
+#
+# F. Pérignon, june 2012
+#
+#=======================================================
+
+# cmake project name
+set(SCALES_NAME parmesscales)
+# --- Name for the package ---
+# This name will be used to install parmeslegi (library, headers, ...) and when another lib or soft will need to search for parmeslegi.
+set(SCALESPACKAGE_NAME "parmeslegi")
+# --- Set a version number for the package ---
+set(${SCALESPACKAGENAME}_version 1.0.0)
+# --- The name (without extension) of the lib to be created ---
+set(SCALES_LIBRARY_NAME ${SCALES_NAME})
+
+# Any files in these dirs will be used to create parmeslegi exec (linked with libparmes)
+set(${SCALES_LIBRARY_NAME}_SRCDIRS 
+  src
+  src/Layout
+  src/particles
+  src/output
+  )
+
+# Matching expr for files to be compiled. 
+set(EXTS *.f90)
+# Matching expr for headers (install purpose)
+set(EXTS_HDRS *.hpp *.h)
+
+set(${SCALESEXE_NAME}_SRCDIRS test/ test/src/ test/src/Test_advec
+  test/src/Test_io test/src/Test_topo)
+  
+set(${SCALES_NAME}_LINK_FLAGS ${${SCALES_NAME}_LINK_FLAGS} ${MPI_Fortran_LINK_FLAGS})
+
+set(SCALESLIBS ${SCALESLIBS} ${MPI_Fortran_LIBRARIES} )
+
+# ============= Source and header files list =============
+# We scan all files with matching extension in directories 
+# containing sources.
+# Source files list:
+foreach(_DIR ${${SCALES_LIBRARY_NAME}_SRCDIRS})
+  set(_DIR_FILES)
+  foreach(_EXT ${EXTS}) # Source files
+    file(GLOB _DIR_FILES_EXT ${_DIR}/${_EXT})
+    if(_DIR_FILES_EXT)
+      list(APPEND ${SCALES_LIBRARY_NAME}_SRC ${_DIR_FILES_EXT})
+    endif()
+  endforeach()
+endforeach()
+
+# We add headers to source files
+list(APPEND ${SCALES_LIBRARY_NAME}_SRC ${${SCALES_LIBRARY_NAME}_HDRS})
+
+# Add directories to those searched by compiler ...
+# -I
+include_directories(${${SCALES_LIBRARY_NAME}_SRCDIRS})
+
+# ============= Creates the library =============
+if(BUILD_SHARED_LIBS) # shared library
+  add_library(${SCALES_LIBRARY_NAME} SHARED ${${SCALES_LIBRARY_NAME}_SRC})
+else() # static library
+  add_library(${SCALES_LIBRARY_NAME} STATIC ${${SCALES_LIBRARY_NAME}_SRC})
+endif()
+# Libs to link with PROJECT__LIBRARY_NAME
+target_link_libraries(${SCALES_LIBRARY_NAME} ${SCALESLIB})
+
+# ============== Add tests ==============
+if(WITH_TESTS)
+  message(STATUS "Enable testing ...")
+endif(WITH_TESTS)
+
+# ============= Prepare install =============
+
+# The library
+# The library, the headers and mod files, the cmake generated files
+# will be install in CMAKE_INSTALL_PREFIX/lib include and share
+#include(InstallPackage)
+
+#install_package(${SCALESPACKAGENAME} ${SCALES_LIBRARY_NAME} ${${SCALES_NAME}_HDRS})
+
+#install(TARGETS ${SCALESEXE_NAME}  
+#RUNTIME DESTINATION bin  # executables
+#  )
diff --git a/HySoP/src/scalesInterface/layout/cart_mesh.f90 b/HySoP/src/scalesInterface/layout/cart_mesh.f90
new file mode 100644
index 000000000..98969dacc
--- /dev/null
+++ b/HySoP/src/scalesInterface/layout/cart_mesh.f90
@@ -0,0 +1,127 @@
+!> @addtogroup cart_structure
+!! @{
+
+!-----------------------------------------------------------------------------
+!
+! MODULE: cart_mesh
+!
+!
+! DESCRIPTION: 
+!>  This module provide a mesh structure. It is used for output and as future
+!! base to deal with different scalar field computed with eventually different
+!! resolutions.
+!
+!> @details 
+!!  This module provide structure to save mesh context associated to a field.
+!! This allow to easily work with different resolutions and to know how
+!! mesh interact with the mpi topology.
+!! It provide the different tools to initialise the type to some default
+!! value or to auto-complete it.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module cart_mesh
+
+    use precision
+
+    implicit none
+
+    public
+    
+    ! ===== Type =====
+    ! > Information about mesh subdivision and on the global grid
+    type cartesian_mesh
+        !> number of grid points in each direction
+        integer, dimension(3)   :: N
+        !> number of grid point for the local subgrid in each direction
+        integer, dimension(3)   :: N_proc
+        !> information about min and max local indice on the current directory
+        integer, dimension(3,2) :: relative_extend
+        !> information about min and max global indice associated to the current processus
+        integer, dimension(3,2) :: absolute_extend
+        !> space step for field discretisation
+        real(WP), dimension(3)  :: d_space
+    end type cartesian_mesh
+        
+
+    ! ===== Public procedures =====
+    ! Auto-complete cartesian_mesh data field.
+    public      :: mesh_save
+    ! Create a cartesian_mesh variable related to data save in cart_topolgoy module.
+    public      :: mesh_save_default
+
+
+contains
+
+!> Save data about the cartesian mesh create in cart_topology module
+!>    @param[out]   mesh    = varialbe of type cartesian_mesh where the data about mesh are save
+subroutine mesh_save_default(mesh)
+
+    use cart_topology   ! Description of mesh and of mpi topology
+    implicit none
+
+    ! Input/Output
+    type(cartesian_mesh), intent(out)       :: mesh
+    ! Other local variables
+    integer                                 :: direction    ! integer matching to a direction (X, Y or Z)
+
+    ! Number of mesh
+    mesh%N = N
+    mesh%N_proc = N_proc
+
+    ! Relative extend
+    mesh%relative_extend(:,1) = begin_proc
+    mesh%relative_extend(:,2) = end_proc
+    ! Absolute one
+    do direction = 1, 3
+        mesh%absolute_extend(direction,:) = coord(direction)*N_proc(direction) + mesh%relative_extend(direction,:)
+    end do
+
+    ! Space step
+    mesh%d_space = d_sc
+
+end subroutine mesh_save_default
+
+
+!> Auto-complete some field about "cartesian_mesh" variables.
+!>    @param[out]   mesh    = variable of type cartesian_mesh where the data about mesh are save
+!>    @param[in]    Nb      = number of grid point along each direction
+!>    @param[in]    Nb_proc = number of grid point along each direction associated to the current processus
+!>    @param[in]    d_space = space step
+!>    @param[in]    coord   = coordinate of the current processus in the 3D mpi-topology
+subroutine mesh_save(mesh, Nb, Nb_proc, d_space, coord)
+
+    implicit none
+
+    ! Input/Output
+    type(cartesian_mesh), intent(out)       :: mesh
+    integer, dimension(3), intent(in)       :: Nb
+    integer, dimension(3), intent(in)       :: Nb_proc
+    integer, dimension(3), intent(in)       :: coord
+    real(WP), dimension(3), intent(in)      :: d_space
+    ! Other local variables
+    integer                                 :: direction    ! integer matching to a direction (X, Y or Z)
+
+    ! Number of mesh
+    mesh%N = Nb
+    mesh%N_proc = Nb_proc
+
+    ! Relative extend
+    mesh%relative_extend(:,1) = 1
+    mesh%relative_extend(:,2) = Nb_proc
+    ! Absolute one
+    do direction = 1, 3
+        mesh%absolute_extend(direction,:) = coord(1)*Nb_proc(direction) + mesh%relative_extend(direction,:)
+    end do
+
+    ! Space step
+    mesh%d_space = d_space
+
+end subroutine mesh_save
+
+
+end module cart_mesh
+!> @}
diff --git a/HySoP/src/scalesInterface/layout/cart_topology.f90 b/HySoP/src/scalesInterface/layout/cart_topology.f90
new file mode 100644
index 000000000..3bf33e570
--- /dev/null
+++ b/HySoP/src/scalesInterface/layout/cart_topology.f90
@@ -0,0 +1,658 @@
+!> @addtogroup cart_structure
+!! @{
+
+!------------------------------------------------------------------------------
+
+!
+! MODULE: cart_topology
+!
+!
+! DESCRIPTION: 
+!>  This module provide a cartesien topology on the parrallel layout.
+!
+!> @details 
+!!  This module provide a cartesien topology on the parrallel layout.
+!! This virtual topology is created by the MPI procedures (and thus use
+!! low-level optimisation based on the underlyinfg hardware). It
+!! provides the different tools to create, to manipulate and to interface
+!! it with the other topology and communicators.
+!! The solver use some dimensionnal splitting and this module contains all the
+!! method used to solve advection along the Y-axis. This is a parallel
+!! implementation using MPI and the cartesien topology it provides.
+!!
+!!  Nowaday, the domain is only splitted along Y and Z axis. Therefore,
+!! we only use a 2D cartesian topology.
+!!  A "global" communicator is devoted to the (2D) cartesian structure.
+!! Another communicator is added for each direction in order to deal
+!! with all 1D communication (along Y or Z).
+!! Be careful : the (Y,Z)-axis in the 3D mesh match to the (X,Y) axis on the 2D
+!! mpi-topology.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module cart_topology
+
+  use precision
+  use mpi, only : MPI_COMM_WORLD,MPI_TAG_UB
+
+  implicit none
+  
+  ! ----- Communicators -----
+  !> Communicator associated with the cartesian topology
+  integer, protected                  :: cart_comm        
+  !> Communicators devoted to 1-dimensionnal subgrids (along Y and Z)
+  integer, protected                  :: X_comm, Y_comm, Z_comm   
+  !> Table of the previous communicators (ie comm devoted to 1D subgrids)
+  integer, dimension(3), protected    :: D_comm
+  !> Rank of immediate neighbors
+  integer, dimension(3,2), protected  :: neighbors
+  ! ----- Information about mesh subdivision and on the global grid -----
+  !> number of processes in each direction
+  integer, dimension(3), protected    :: nb_proc_dim
+  !> information about min and max local indice on the current directory
+  integer, dimension(3), protected    :: begin_proc, end_proc     
+  !> space lengh of th domain
+  real(WP), dimension(3), protected   :: length
+  !> space step for scalar discretisation
+  real(WP), dimension(3), protected   :: d_sc                     
+  !> number of (sub)grid in each direction
+  integer, dimension(3), protected    :: N
+  integer, dimension(3), protected    :: N_proc
+  !> rank of current processus (in the cartesian communicator)
+  integer, public                     :: cart_rank
+  !> rank of current processus (in the in communicator associated to the different direction)
+  integer, dimension(3), public       :: D_rank
+  !> coordinate of the current processus
+  integer, dimension(3), protected    :: coord
+  !> YZ coordinate of the current processus
+  integer, dimension(2), protected    :: coordYZ
+  !> Periodic boundary conditions: logical array, equals true if periodic
+  logical, dimension(3),protected     :: periods                      
+  !> Computation are done by group of line. Here we define their size
+  integer, dimension(3,2), protected  :: group_size
+  !> To check if group size is initialized
+  logical, private                    :: group_init = .false.
+  !> To concatenate position in order to create unique mpi message tag
+  integer, dimension(3,2), private    :: tag_size
+  !> To concatenate rank in order to create unique mpi message tag
+  integer, private                    :: tag_rank
+  !> To check if mesh is already initialized
+  logical, private                    :: mesh_init = .false.
+  !> Default mesh resolution
+  integer, parameter                  :: default_size = 80
+
+  ! ==== Public procedures ==== 
+  ! Creation of the cartesian topology
+  public cart_create
+  ! Initialise mesh information (first part)
+  public      :: discretisation_create
+  public      :: discretisation_default
+  ! Compute tag for mpi message
+  public      :: compute_tag
+  private     :: compute_tag_gap
+  private     :: compute_tag_NP
+  ! Adjust some private variale
+  public      :: set_group_size
+  private     :: set_group_size_1
+  private     :: set_group_size_1x2
+  private     :: set_group_size_3
+  private     :: set_group_size_init
+
+  ! ==== Public procedures ==== 
+  ! Initialise mesh information (second part)
+  private     :: discretisation_init
+
+  interface compute_tag
+     module procedure compute_tag_gap, compute_tag_NP
+  end interface compute_tag
+
+  interface set_group_size
+     !>    Size of group of line is used to gather line together in the particle
+     !! solver. As it is a crucial parameter, it must be possible for user to changed
+     !! it without broke everything (ie if user value is bad it has to be ignored) and
+     !! to be set up by default to a "right and intelligent" value. Use
+     !! set_group_size for this purpose. An optional logical argument "init" is
+     !! used to set up group_size to a default "acceptable" value if user value
+     !! is not acceptable considering mesh size (ie will create bugs).
+     module procedure set_group_size_1, set_group_size_1x2, set_group_size_3, &
+          & set_group_size_init
+  end interface set_group_size
+
+contains
+
+  !> Creation of the cartesian mpi topology and (if needed) of all communicators
+  !! used for particles method.
+  !!    @param[in]    dims        = array specifying the number of processes in each dimension
+  !!    @param[out]   ierr        = error code
+  !!    @param[out]   spec_comm   = mpi communicator used by the spectral part of the code (optional).
+  !!    @param[in]    topology    = to choose the dimension of the mpi topology (if 0 then none) (optional).
+  !! @details 
+  !!        This subroutine initialzed the mpi topologic and returns the communicator
+  !!    that will be used for all the spectral part of the code (ie everything except
+  !!    the particles part). If needed, it also initialzed all the mpi context
+  !!    used by the particles solver.
+  subroutine cart_create(dims, ierr, spec_comm, topology)
+
+    ! Input/Output
+    integer, dimension(:), intent(in)   :: dims
+    integer, intent(out)                :: ierr
+    integer, optional, intent(out)      :: spec_comm        
+    integer, optional, intent(in)       :: topology
+    ! Other lspec_comm        ocal variables
+    logical                 :: reorganisation                   ! to choose to reordered or not the processus rank.
+    logical, dimension(3)   :: remains_dim                      ! use to create 1D-subdivision : remains_dims(i) equal
+    ! true if the i-th dimension is kept in the subgrid.
+    integer                 :: direction                        ! current direction : 1 = along X, 2 = along Y and 3 = alongZ
+    integer                 :: topology_dim=3                   ! recopy of the optional input "topology".
+    integer                 :: key                              ! to re-order processus in spec_comm
+    integer, dimension(1)   :: nb_proc                          ! total number of processus
+    logical, dimension(1)   :: period_1D = .false.              ! periodicity in case of 1D mpi topology.
+
+    ! If there is some scalar to advec with particles method, then initialized
+    ! the 2D mpi topology
+    if (present(topology))  then
+       select case (topology)
+       case(0)
+          topology_dim = 0
+       case(1)
+          topology_dim = 1
+       case default
+          topology_dim = 3
+       end select
+    end if
+
+    select case (topology_dim)
+    case (3)
+       ! ===== Create a 2D mpi topology =====
+       ! 2D topology is created and mpi context is initialized for both
+       ! spectral and particles code
+
+       ! --- Creation of the cartesian topology ---
+       reorganisation = .true.
+       periods = .true.
+       if (size(dims)==2) then 
+          nb_proc_dim = (/ 1, dims(1), dims(2) /)
+       else if (size(dims)==3) then
+          nb_proc_dim = dims
+          if (nb_proc_dim(1)/=1) then
+             call mpi_comm_rank(MPI_COMM_WORLD, cart_rank, ierr)
+             if (cart_rank==0) write(*,'(a)') ' XXXXXXXXXX Warning: subdision along X XXXXXXXXXX'
+          end if
+       else
+          call mpi_comm_rank(MPI_COMM_WORLD, cart_rank, ierr)
+          if (cart_rank==0) then
+             write(*,'(a)') ' XXXXXXXXXX Error - wrong nb of processus XXXXXXXXXX'
+             write(*,'(a,10(x,i0))') ' input argument dims =', dims
+          end if
+          stop
+       end if
+
+       call mpi_cart_create(MPI_COMM_WORLD, 3, nb_proc_dim, periods, reorganisation, &
+            & cart_comm, ierr)
+
+       ! --- Create 1D communicator ---
+       ! Subdivision in 1D-subgrids and creation of communicator devoted to
+       ! 1D-communication
+       ! Communication along X-axis 
+       remains_dim = (/.true., .false., .false. /)
+       call mpi_cart_sub(cart_comm, remains_dim, X_comm, ierr)
+       D_comm(1) = X_comm
+       ! Communication along Y-axis (in the 3D mesh, ie the x-axis on the mpi-topology)
+       remains_dim = (/.false., .true., .false. /)
+       call mpi_cart_sub(cart_comm, remains_dim, Y_comm, ierr)
+       D_comm(2) = Y_comm
+       ! Communication along Z-axis 
+       remains_dim = (/ .false., .false., .true. /)
+       call mpi_cart_sub(cart_comm, remains_dim, Z_comm, ierr)
+       D_comm(3) = Z_comm
+
+       ! --- Initialise information about the current processus ---
+       call mpi_comm_rank(cart_comm, cart_rank, ierr)
+       do direction = 1, 3
+          call mpi_comm_rank(D_comm(direction), D_rank(direction), ierr)
+          call mpi_cart_shift(D_comm(direction), 0, 1, neighbors(direction,1), neighbors(direction,2), ierr)
+       end do
+       call mpi_cart_coords(cart_comm, cart_rank, 3, coord, ierr)
+       coordYZ = (/ coord(2), coord(3) /)
+
+       ! --- Spectral context ---
+       ! Initialized the communicator used on which the spectral part
+       ! will be based.
+       if (present(spec_comm)) then
+          !> Rank numerotation in spectral communicator grow along first
+          !! direction first and then along the second, the opposite of mpi 
+          !! rank numerotation. That is why processus are reoder and 2
+          !! communicator are created.
+          !! Example with 4 processus 
+          !! coord    // mpi-cart rank    // spec rank
+          !! (0,0,0)  // 0                // 0
+          !! (0,1,0)  // 2                // 1
+          !! (0,0,1)  // 1                // 2
+          !! (0,1,1)  // 3                // 3
+          ! Construct key to reoder
+          key = coord(1) + (coord(2) + coord(3)*nb_proc_dim(2))*nb_proc_dim(1)
+          ! As not split along X, it must be equivalent to "key = coord(2) + coord(3)*nb_proc_dim(2)"
+          ! Construct spectral communicator
+          call mpi_comm_split(cart_comm, 1, key, spec_comm, ierr)
+       end if
+
+    case (1)
+       ! Construct 1D non-periodic mpi topology
+       nb_proc = product(nb_proc_dim)
+       call mpi_cart_create(MPI_COMM_WORLD, 1, nb_proc, period_1D, reorganisation, &
+            & cart_comm, ierr)
+       ! Use it as spectral communicator.
+       spec_comm = cart_comm
+
+    case default
+       ! ===== Do not use mpi topology =====
+       if (present(spec_comm)) then
+          spec_comm = MPI_COMM_WORLD
+       end if
+    end select
+
+
+    ! Print some minimal information about the topology
+    if (cart_rank == 0) then
+       write(*,'(a)') ''
+       write(*,'(6x,a)') '========== Topology used ========='
+       if (topology_dim == 0) then
+          write(*,'(6x,a)') 'No mpi topology'
+       else
+          write(*,'(6x,i0,a)') topology_dim,'D mpi topology'
+       end if
+       write(*,'(6x,a,i0,x,i0,x,i0)') 'nb of proc along X, Y, Z = ', nb_proc_dim
+       write(*,'(6x,a)') '=================================='
+       write(*,'(a)') ''
+    end if
+
+  end subroutine cart_create
+
+  !> Create the mesh structure associated to the topology 
+  !!    @param[in]    Nx  = number of meshes along X
+  !!    @param[in]    Ny  = number of meshes along X
+  !!    @param[in]    Nz  = number of meshes along X
+  !!    @param[in]    Lx  = number of meshes along X
+  !!    @param[in]    Ly  = number of meshes along Y
+  !!    @param[in]    Lz  = number of meshes along Z
+  !! @details
+  !!    Initialise the mesh data associated to the mpi topology and used by the
+  !!    particle solver
+  !!    @author Jean-Baptiste Lagaert
+  subroutine discretisation_create(resolution,domainLength)
+
+    ! Input/Output
+    integer, dimension(3),intent(in)     :: resolution
+    real(WP), dimension(3),intent(in)    :: domainLength
+
+    ! A cubic geometry : unitary lengh and 100 mesh points in each direction.
+
+    N = resolution
+    length = domainLength
+
+    N_proc = N / nb_proc_dim
+    begin_proc = 1
+    end_proc = N_proc
+
+    ! Adjust group size :
+    call set_group_size_init()
+    ! Finish init
+    mesh_init = .false.
+    call discretisation_init()
+
+  end subroutine discretisation_create
+
+  !> Defaut mesh setup
+  !!    @author Jean-Baptiste Lagaert
+  !! @details
+  !!    Initialise the mesh data associated to the mpi topology and used by the
+  !!    particle solver to a default 100x100x100 mesh grid.
+  subroutine discretisation_default()
+
+    ! A cubic geometry : unitary lengh and 100 mesh points in each direction.
+    N = default_size
+    length = 1.
+    N_proc = N / nb_proc_dim
+    begin_proc = 1
+    end_proc = N_proc
+
+    group_init = .false.
+    call set_group_size_init()
+    mesh_init = .false.
+    call discretisation_init()
+
+  end subroutine discretisation_default
+
+  !> To initialize some hidden mesh parameters
+  !!    @author Jean-Baptiste Lagaert
+  !! @details
+  !!        In order to deal well with the mpi topology, the data structure and the
+  !!    mesh cut, some other parameters have to be initialised. Some are parameters
+  !!    that could not be choose by the user (eg the space step which depend of the
+  !!    domain size and the number of mesh) and some other are "hidden" parameter used
+  !!    to avoid communication error or to allowed some optimization. For example, it
+  !!    include variable used to create unique tag for the different mpi communication,
+  !!    to gather line in group and to index these group.
+  subroutine discretisation_init()
+
+    integer                 :: direction    ! direction (along X = 1, along Y = 2, along Z = 3)
+    integer                 :: group_dir    ! direction "bis"
+    integer, dimension(3,2) :: N_group      ! number of group on one processus along one direction
+
+    d_sc = length/(N)
+
+    ! Compute number of group
+    ! Group of line along X
+    N_group(1,1) = N_proc(2)/group_size(1,1)
+    N_group(1,2) = N_proc(3)/group_size(1,2)
+    ! Group of line along X
+    N_group(2,1) = N_proc(1)/group_size(2,1)
+    N_group(2,2) = N_proc(3)/group_size(2,2)
+    ! Group of line along X
+    N_group(3,1) = N_proc(1)/group_size(3,1)
+    N_group(3,2) = N_proc(2)/group_size(3,2)
+    ! But not everything is done by groups !!
+    ! Group of line along X
+    N_group(1,1) = N_proc(2)
+    N_group(1,2) = N_proc(3)
+    ! Group of line along X
+    N_group(2,1) = N_proc(1)
+    N_group(2,2) = N_proc(3)
+    ! Group of line along X
+    N_group(3,1) = N_proc(1)
+    N_group(3,2) = N_proc(2)
+
+    ! tag_size = smallest power of ten to ensure tag_size > max ind_group
+    do direction = 1,3
+       tag_size(direction,:) = 1
+       do group_dir = 1,2
+          do while (N_group(direction, group_dir)/(10**tag_size(direction, group_dir))>1)
+             tag_size(direction, group_dir) = tag_size(direction, group_dir)+1 
+          end do
+       end do
+    end do
+
+    tag_rank = 1
+    do while(3*max(nb_proc_dim(1),nb_proc_dim(2),nb_proc_dim(3))/(10**tag_rank)>=1)
+       tag_rank = tag_rank+1
+    end do
+    if (tag_rank == 1) tag_rank = 2
+
+    ! Print some information about mesh used
+    if(cart_rank==0) then
+       write(*,'(a)') ''
+       if(mesh_init) then 
+          write(*,'(6x,a,a24,a)') 'XXXXXX','group sized changed ','XXXXXX'
+       else
+          write(*,'(6x,a,a30,a)') '-- ','mesh size',' --'
+          write(*,'(6x,a,3(x,i0))') 'global size =',N
+          write(*,'(6x,a,3(x,i0))') 'local size =',N_proc
+       end if
+       write(*,'(6x,a,2(x,i0))') 'group size along X =',group_size(1,:)
+       write(*,'(6x,a,2(x,i0))') 'group size along Y =',group_size(2,:)
+       write(*,'(6x,a,2(x,i0))') 'group size along Z =',group_size(3,:)
+       write(*,'(6x,a)') '-- initialisation: tag generation --'
+       do direction = 1,3
+          write(*,'(6x,a,i0,a,i0,x,i0)') 'tag_size(',direction,',:) = ', tag_size(direction,:)
+       end do
+       write(*,'(6x,a,i0)') 'tag_rank = ', tag_rank
+       write(*,'(6x,a)') '------------------------------------'
+       write(*,'(a)') ''
+    end if
+
+    mesh_init = .true.
+
+  end subroutine discretisation_init
+
+  !> Compute unique tag for mpi message by concatenation of position (ie line coordinate), proc_gap and unique Id
+  !!    @param[in]    ind_group   = indice of current group of line
+  !!    @param[in]    tag_param   = couple of int unique for each message (used to create the tag)
+  !!    @param[in]    direction   = current direction
+  !!    @param[in]    proc_gap    = number of processus between the sender and the receiver
+  !!    @return       tag         = unique tag: at each message send during an iteration have a different tag
+  !!@details 
+  !!     Use this procedure to compute tag in order to communicate with a distant processus or/and when
+  !!    you will send more then two message. It produce longer tag compute_tag_NP because rather tyo use 0/1 it 
+  !!    put the gap between the sender and the receiver (ie the number of processus between them) in the tag.
+  !!    Using these two procedure allow to obtain more unique tag for communication.
+  function compute_tag_gap(ind_group, tag_param, direction,proc_gap) result(tag)
+
+    ! Returned variable
+    integer                             :: tag
+    ! Input/Ouput
+    integer, dimension(2), intent(in)   :: ind_group
+    integer, dimension(2), intent(in)   :: tag_param
+    integer, intent(in)                 :: direction
+    integer, intent(in)                 :: proc_gap
+    ! Other local variables
+    integer                              :: abs_proc_gap ! absolute value of proc_gap
+
+    abs_proc_gap = max(abs(proc_gap),1)
+    tag = (tag_param(1)*10+direction)*(10**(tag_rank+1))
+    if (proc_gap>=0) then
+       tag = tag + proc_gap*10 
+    else
+       tag = tag - proc_gap*10 +1
+    end if
+    tag = (tag*(10**tag_size(direction,1)))+(ind_group(1)-1)
+    tag = ((tag*(10**tag_size(direction,2)))+(ind_group(2)-1))
+    tag = (tag*10)+tag_param(2)
+
+    ! As tag can not be to big (it must be a legal integer and smaller than
+    ! maximum mpi tag)
+    if ((tag<0).or.(tag>MPI_TAG_UB))  then
+       !print*, 'tag too big - regenerated'
+       tag = (tag_param(1))*(10**(tag_rank+1))
+       if (proc_gap>=0) then
+          tag = tag + proc_gap*10 
+       else
+          tag = tag - proc_gap*10 +1
+       end if
+       tag = tag*(10**tag_size(direction,1))+(ind_group(1)-1)
+       tag = ((tag*(10**tag_size(direction,2)))+(ind_group(2)-1))
+       tag = (tag*10)+tag_param(2)
+       if ((tag<0).or.(tag>MPI_TAG_UB))  then
+          !print*, 'tag very too big - regenerated'
+          tag = (tag_param(1))*(10**(tag_rank+1))
+          if (proc_gap>=0) then
+             tag = tag + proc_gap*10 
+          else
+             tag = tag - proc_gap*10 +1
+          end if
+          tag = (tag*10)+tag_param(2)
+          if ((tag<0).or.(tag>MPI_TAG_UB))  then
+             tag = tag_param(1)*10 + tag_param(2)
+             if (proc_gap<0) tag = tag +100
+             !print*, 'rank = ', cart_rank, ' coord = ', coord
+             !print*, 'ind_group = ', ind_group, ' ; tag_param = ', tag_param
+             !print*, 'direction = ', direction, ' gap = ', proc_gap ,' and tag = ', tag
+          end if
+       end if
+    end if
+    ! XXX Fin aide au debug XXX
+
+  end function compute_tag_gap
+
+
+  !> Compute unique tag for mpi message by concatenation of position(ie line coordinate), +1 or -1 and unique Id
+  !!    @param[in]    ind_group   = indice of current group of line
+  !!    @param[in]    tag_param   = couple of int unique for each message (used to create the tag)
+  !!    @param[in]    direction   = current direction
+  !!    @return       tag_table   = unique couple tag: use tag_table(1) for mesage to previous proc. (or first 
+  !!                                message ) and tag_table(2) for the other message.
+  !!@details 
+  !!     Use this procedure to compute tag for communication with your neighbor or when only two message are send:
+  !!    it produce smaller tag then compute_tag_gap because the gap between sender and receiver are replaced by 1,
+  !!    for communicate with previous processus (or first of the two message), or 0, for communication with next 
+  !!    processus (or the second message). It allow to reuse some unique Id.
+  function compute_tag_NP(ind_group, tag_param, direction) result (tag_table)
+
+    ! Returned variable
+    integer, dimension(2)               :: tag_table
+    ! Input/Ouput
+    integer, dimension(2), intent(in)   :: ind_group
+    integer, dimension(2), intent(in)   :: tag_param
+    integer, intent(in)                 :: direction
+
+    tag_table(2) = (tag_param(1)*10+direction)*10
+    tag_table(1) = tag_table(2)
+
+    tag_table(2) = tag_table(2) +1
+
+    tag_table(2) = (tag_table(2)*(10**tag_size(direction,1)))+(ind_group(1)-1)
+    tag_table(1) = (tag_table(1)*(10**tag_size(direction,1)))+(ind_group(1)-1)
+
+    tag_table(2) = ((tag_table(2)*(10**tag_size(direction,2)))+(ind_group(2)-1))
+    tag_table(1) = ((tag_table(1)*(10**tag_size(direction,2)))+(ind_group(2)-1))
+
+    tag_table(2) = (tag_table(2)*10)+tag_param(2)
+    tag_table(1) = (tag_table(1)*10)+tag_param(2)
+
+    ! Check if tag limitations are respected.
+    if ((minval(tag_table)<0).or.(maxval(tag_table)>MPI_TAG_UB))  then
+       tag_table = tag_param(1)*100 + tag_param(2)*10
+       tag_table = tag_table + (/1,2/)
+       !print*, 'rank = ', cart_rank, ' coord = ', coord
+       !print*, 'ind_group = ', ind_group, ' ; tag_param = ', tag_param
+       !print*, 'direction = ', direction, ' and tag = ', tag_table
+    end if
+
+
+  end function compute_tag_NP
+
+
+  !> Adjust the private variable "group_size": line are gathering on group of same
+  !! size undependant from the direction
+  !!    @param[in]    s       =  integer such as group will gather sxs lines
+  !!    @param[in]    init    =  logical to said if it is a default init of group_size
+  !! @details
+  !!    Create group of line s x s along the three direction.
+  subroutine set_group_size_1(s, init)
+
+    integer, intent(in)             :: s
+    logical, intent(in), optional   :: init
+
+    if (.not.mesh_init) then
+       group_size = s
+       ! And now group size is initialized !
+       group_init = .true.
+    else
+       if (all(mod(N_proc,s)==0)) group_size = s
+       call discretisation_init()
+    end if
+
+    if (present(init)) call set_group_size_init(init)
+
+  end subroutine set_group_size_1
+
+
+  !> Adjust the private variable "group_size": line are gathering on group of same
+  !! size undependant from the direction
+  !!    @param[in]    s1      =  integer such as group will gather s1 line along first remaining direction
+  !!    @param[in]    s2      =  integer such as group will gather s1 line along second remaining direction
+  !!    @param[in]    init    =  logical to said if it is a default init of group_size
+  !! @details
+  !!    Created group will gather s1 x s2 lines
+  subroutine set_group_size_1x2(s1, s2, init)
+
+    integer, intent(in)             :: s1, s2
+    logical, intent(in), optional   :: init
+
+    if (.not. mesh_init) then
+       group_size(:,1) = s1
+       group_size(:,2) = s2
+       ! And now group size is initialized !
+       group_init = .true.
+    else
+       if (all(mod(N_proc,s1)==0)) group_size(:,1) = s1
+       if (all(mod(N_proc,s2)==0)) group_size(:,2) = s2
+       call discretisation_init()
+    end if
+
+    if (present(init)) call set_group_size_init(init)
+
+  end subroutine set_group_size_1x2
+
+
+  !> Adjust the private variable "group_size": line are gathering on group of a
+  !! size depending of the current direction.
+  !!    @param[in]    sX =  integer such as group of lines along X will gather sX x sX lines
+  !!    @param[in]    sY =  integer such as group of lines along Y will gather sY x sY lines
+  !!    @param[in]    sZ =  integer such as group of lines along Z will gather sZ x sX lines
+  !!    @param[in]    init    =  logical to said if it is a default init of group_size
+  subroutine set_group_size_3(sX, sY, sZ, init)
+
+    integer, intent(in)             :: sX, sY, sZ
+    logical, intent(in), optional   :: init
+
+    if (.not.mesh_init) then
+       group_size(1,:) = sX
+       group_size(2,:) = sY
+       group_size(3,:) = sZ
+       ! And now group size is initialized !
+       group_init = .true.
+    else
+       if (all(mod(N_proc(2:3),sX)==0)) group_size(1,:) = sX
+       if ((mod(N_proc(1),sY)==0).and.(mod(N_proc(3),sY)==0)) group_size(2,:) = sY
+       if ((mod(N_proc(1),sZ)==0).and.(mod(N_proc(2),sZ)==0)) group_size(3,:) = sZ
+       call discretisation_init()
+    end if
+
+    if (present(init)) call set_group_size_init(init)
+
+
+  end subroutine set_group_size_3
+
+  !> Adjust the private variable "group_size": line are gathering on group of same
+  !! size undependant from the direction
+  !!    @param[in]    init    =  logical to said if it is a default init of group_size
+  !! @details
+  !!    Create group of acceptable default size (or re-init group size if optional
+  !! argument "init" is present and set to true).
+  subroutine set_group_size_init(init)
+
+    logical, intent(in), optional   :: init
+
+    ! To check if group size is well defined
+    integer, dimension(3,2)         :: domain_size
+
+    if (present(init)) group_init = init
+
+    if (.not.group_init) then
+       ! Setup the size of line group to a default value
+       if (all(mod(N_proc,5)==0)) then
+          group_size = 5
+       else if (all(mod(N_proc,4)==0)) then
+          group_size = 4
+       else if (all(mod(N_proc,2)==0)) then
+          group_size = 2
+       else
+          group_size = 1
+       end if
+       ! And now group size is initialized !
+       group_init = .true.
+    else
+       domain_size(1,:) = (/N_proc(2), N_proc(3)/)
+       domain_size(2,:) = (/N_proc(1), N_proc(3)/)
+       domain_size(3,:) = (/N_proc(1), N_proc(2)/)
+
+       where (mod(domain_size,group_size)/=0)
+          where(mod(domain_size,5)==0)
+             group_size=5
+          elsewhere(mod(domain_size,4)==0)
+             group_size=4
+          elsewhere(mod(domain_size,2)==0)
+             group_size=2
+          elsewhere
+             group_size=1
+          end where
+       end where
+    end if
+
+  end subroutine set_group_size_init
+
+end module cart_topology
+!> @}
diff --git a/HySoP/src/scalesInterface/output/parallel_io.f90 b/HySoP/src/scalesInterface/output/parallel_io.f90
new file mode 100644
index 000000000..73ff9f2bb
--- /dev/null
+++ b/HySoP/src/scalesInterface/output/parallel_io.f90
@@ -0,0 +1,433 @@
+!------------------------------------------------------------------------------
+!
+! MODULE: parallel_out
+!
+!
+! DESCRIPTION: 
+!> This module provide all procedure needed to perform parallel and distribued
+!! output at vtk format.
+!
+!! @details
+!!        This module developp tools to write distribued output. This means that
+!!    output of one field will be done in one file per (mpi) processus. These allows
+!!    to write field computed with an high resolution without be limitated by the
+!!    size of the file output and to avoid to big loading time during visualisation
+!!    or file loading in order to initialize computation at a given setup.
+!
+!!         This first version provide only output tools. Some input procedures could
+!!    be add in future works. The general context (number of field to save, physical 
+!!    dimension, number of processus, ...) is initiliazed by calling "parallel_io_init_all"
+!!    the context specific to each field (mesh resolution, number of point, name of
+!!    the ouput, information about time sequence, ...) is initialized or save by
+!!    calling "parallel_io_init_field". After that, call "parallel_write" in order to
+!!    create a new output of the field.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+!> @addtogroup output
+!! @{
+module parallel_io
+
+    use precision
+    use cart_mesh
+
+    implicit none
+
+
+    interface parallel_write
+        !module procedure parallel_vect3D, parallel_scalar
+        module procedure parallel_scalar
+    end interface parallel_write
+
+
+    ! ===== Parameter about name size =====
+    !> Size of character string for the name of each field (name of output will be longer)
+    integer, private, parameter :: short_name = 12
+    !> Number of character use to write the indice of an output
+    integer, private, parameter :: size_ite = 3
+    !> Number of character use to write the rank of the processus
+    integer, private            :: size_rank = 3
+    !> Size of character string corresponding to the final output name
+    integer, private            :: long_name = short_name + size_ite + 8
+
+    ! ===== Type =====
+    ! > Information about data to save
+    type io_data
+        !> field name
+        character (len=short_name)              :: f_name
+        !> indice of the current output - in order to construct time sequence.
+        integer                                 :: iteration
+        !> mesh information
+        type(cartesian_mesh)                    :: mesh
+        !> piece extend (for distribued vtk xml format)
+        integer, dimension(:,:,:), allocatable  :: piece_extend
+    end type io_data
+
+    ! ===== Public procedures =====
+    !> Generique procedure to write field in distribued vtk files
+    public                  :: parallel_write
+    ! Initialisation of the mesh context
+    public                  :: parallel_io_init_all
+    public                  :: parallel_io_init_field
+    ! Destruct the io context and free memory
+    public                  :: parallel_io_finish
+
+
+    ! ===== Private procedures =====
+    ! Specifique procedure to write field in distribued vtk files
+    !private                 :: parallel_vect3D
+    private                 :: parallel_scalar
+    ! To search the right tag if the input one does not match to the field name.
+    private                 :: search_io_tag
+
+
+    ! ===== Private variable =====
+    !> number of processes in each direction
+    integer, dimension(3), private                      :: nb_proc
+    !> total number of processes 
+    integer, private                                    :: total_nb_proc
+    !> space lengh of th domain
+    real(WP), dimension(3), private                     :: domain_length
+    !> coordinate of the current processus
+    integer, dimension(3), private                      :: coord3D
+    !> number of different field to save
+    integer, private                                    :: field_number = 0
+    !> number of field for which io_data are already initialized
+    integer, private                                    :: field_initialized
+    !> io_data for each field to save
+    type(io_data), dimension(:), allocatable, private   :: field_data
+    !> rank of current processus (in the 3D cartesian communicator)
+    integer, private                                    :: rank3D
+    !> write format for output name of *.pvti (file without data listing the distribued output)
+    character (len=100), private                        :: format_no_rank
+    !> write format for output name of *.vti (distribued file containing the data)
+    character (len=100), private                        :: format_rank
+    !> write format for datas (ie array output)
+    character (len=100), private                        :: format_data
+    !> directory where data are saved
+    character (len=50), private                         :: dir = './save/'
+    !> smallest value which could be write
+    integer, parameter, private                         :: minValue = 1e-9
+    !> Format of output (ascii or binary)
+    logical, private                                    :: binary = .false.
+
+    contains
+
+!> Initialize the general io context
+!!    @param[in]    nb_field    = number of different field to save
+!!    @param[in]    nb_proc_3D  = number of processus along the different direction
+!!    @param[in]    length_3D   = size of the (physical or eventually spectral) domain
+!!    @param[in]    rank_3D     = rank of the current processus in the 3D mpi-topology
+!!    @param[in]    coord_3D    = coordinates of the current processus in the 3D mpi-topology
+!!    @param[in]    dir_name    = (optional) name of the directory where output have to be done
+subroutine parallel_io_init_all(nb_field, nb_proc_3D, length_3D, rank_3D, coord_3D, dir_name)
+
+    implicit none
+
+    integer, intent(in)                     :: nb_field, rank_3D
+    integer, dimension(3), intent(in)       :: nb_proc_3D, coord_3D
+    real(WP), dimension(3), intent(in)      :: length_3D
+    character (len=*), intent(in), optional :: dir_name
+
+    
+    ! Update save directory
+    if (present(dir_name)) dir = trim(dir_name)
+    
+    ! Check if no output context is already initialized
+    if (field_number/=0) stop 'output context already initialized'
+
+    ! copy data
+    field_number    = nb_field
+    nb_proc         = nb_proc_3D
+    domain_length   = length_3D
+    rank3D          = rank_3D
+    coord3D         = coord_3D
+
+    ! Allocate field_data
+    allocate(field_data(field_number))
+    field_initialized = 0
+    
+    ! Compute "size_rank"
+    total_nb_proc = nb_proc(1)*nb_proc(2)*nb_proc(3)
+    size_rank = 1
+    do while (total_nb_proc/(10**size_rank)>1)
+        size_rank = size_rank + 1
+    end do
+    long_name = long_name + size_rank + len(trim(dir))
+
+    ! Init format for output file name
+    write(format_no_rank,'(a,i0,a1,i0,a)') '(a,a1,i',size_ite,'.',size_ite,',a)'
+    write(format_rank,'(a,i0,a1,i0,a,i0,a1,i0,a)') '(a,a2,i',size_rank,'.',size_rank, &
+            & ',a1,i',size_ite,'.',size_ite,',a)'
+
+end subroutine parallel_io_init_all
+
+
+!> Initialize the context associated to a given field
+!!    @param[in]    f_name  = name of the field (as it will appear in the output)
+!!    @param[out]   tag         = tag used to identifiate the field from the others one when using "parallel_write"
+!!    @param[in]    mesh        = mesh data associated to the current field (optional, if not present use default one)
+subroutine parallel_io_init_field(f_name, tag, mesh)
+
+    use mpi
+    use cart_topology
+
+    implicit none
+
+    character (len=*), intent(in)               :: f_name
+    type(cartesian_mesh), intent(in), optional  :: mesh
+    integer, intent(out)                        :: tag
+
+    integer                                     :: rank         ! mpi processus rank
+    integer, dimension(3)                       :: coord3D      ! coordinate of the processus in the 3D mpi topology
+    integer                                     :: ierr         ! mpi error code
+    integer                                     :: direction
+
+    if (field_initialized<field_number) then 
+        field_initialized = field_initialized + 1
+    else
+        stop 'more field to save than anounced in the construction of the io context (vtk parallel output)'
+    end if
+
+    tag = field_initialized
+
+    ! Save io_data for the field
+    field_data(tag)%f_name = f_name
+    field_data(tag)%iteration  = 0
+    if(present(mesh)) then
+        field_data(tag)%mesh = mesh
+    else
+        call mesh_save_default(field_data(tag)%mesh)
+    end if
+
+    ! Compute piece extend
+    allocate(field_data(tag)%piece_extend(0:total_nb_proc-1,3,2))
+    do rank = 0, total_nb_proc-1
+        call mpi_cart_coords(cart_comm, rank, 3, coord3D, ierr)
+        do direction=1, 3
+            field_data(tag)%piece_extend(rank,direction,1)=coord3D(direction)*field_data(tag)%mesh%N_proc(direction)+1
+            if (coord(direction)<nb_proc(direction)-1) then
+                field_data(tag)%piece_extend(rank,direction,2)=(coord3D(direction)+1)*field_data(tag)%mesh%N_proc(direction)+1
+            else
+                field_data(tag)%piece_extend(rank,direction,2)=(coord3D(direction)+1)*field_data(tag)%mesh%N_proc(direction)
+            end if
+        end do
+    end do
+
+end subroutine parallel_io_init_field
+
+
+!> Destruct the io context and free memory.
+subroutine parallel_io_finish()
+
+    integer     :: tag      ! field identifiant
+
+    ! Free memory
+    do tag = 1, field_initialized
+        deallocate(field_data(tag)%piece_extend)
+    end do
+    deallocate(field_data)
+
+    field_number = 0
+
+end subroutine parallel_io_finish
+
+
+! ===========================================================
+! ==================== Private procedures ===================
+! ===========================================================
+
+!> Write an output for an "one-component" field (eg scalar)
+!!    @param[in,out]    tag     = tag associated to the field to save (ie matching indice in the table "field_data")
+!!    @param[in]        values  = field values - the values to write in the file
+!!    @param[in]        f_name  = name of the field (optional, redondant with tag, can be used to check it)
+subroutine parallel_scalar(tag, values, f_name)
+
+    integer, intent(inout)                              :: tag
+    character (len=*), intent(in), optional             :: f_name
+    real(WP), dimension(:,:,:), intent(in)              :: values
+    
+    character (len=long_name+size_rank)                 :: file_name    ! output name
+    integer                                             :: k, j         ! some array indices
+    character (len=short_name)                          :: f_name_bis
+    
+    ! Check tag for output
+    if (present(f_name)) then
+        f_name_bis = trim(f_name)
+        if (field_data(tag)%f_name /= f_name_bis) tag = search_io_tag(f_name_bis)
+    end if
+
+    ! XXX TODO OR NOT ? Check mesh data ? XXX
+
+    ! ===== Write data ===== 
+
+    ! Write the parallel file wich contain information about distribution (but no datas)
+    if (rank3D==0) call parallel_file_description(tag)
+
+    ! Write local file
+    ! Open file
+    file_name = trim(dir)//trim(output_name(tag, rank3D))
+    if (binary) then
+        !open(unit = 44, file=file_name, status='new', access='stream')
+        open(unit = 44, file=file_name, status='new', form='formatted')
+    else
+        open(unit = 44, file=file_name, status='new', form='formatted')
+    end if
+    ! XXX TODO : on ouvre en binaire, puis quand on écrit des char, il fait bien
+    ! du ascii !!! 
+    ! Write header
+    write(44, '(a21)') '<?xml version="1.0"?>'
+    write(44, '(a)') '<VTKFile type="ImageData" version="0.1" byte_order="LittleEndian">'
+    write(44,'(2x,a,i0,x,i0,x,i0,x,i0,x,i0,x,i0,a)') '<ImageData WholeExtent="', &
+        & field_data(tag)%piece_extend(rank3D,1,1), &
+        & field_data(tag)%piece_extend(rank3D,1,2), &
+        & field_data(tag)%piece_extend(rank3D,2,1), &
+        & field_data(tag)%piece_extend(rank3D,2,2), &
+        & field_data(tag)%piece_extend(rank3D,3,1), &
+        & field_data(tag)%piece_extend(rank3D,3,2),'"'
+    write(44, '(4x,a)') 'Origin="0 0 0"'
+    write(44, '(4x,a,f6.4,x,f6.4,x,f6.4,a)') 'Spacing="', field_data(tag)%mesh%d_space(1), &
+        & field_data(tag)%mesh%d_space(2), field_data(tag)%mesh%d_space(3), '">'
+    write(44, '(4x,a,i0,x,i0,x,i0,x,i0,x,i0,x,i0,a)') '<Piece Extent="', &
+        & field_data(tag)%mesh%absolute_extend(1,1), &
+        & field_data(tag)%mesh%absolute_extend(1,2), &
+        & field_data(tag)%mesh%absolute_extend(2,1), &
+        & field_data(tag)%mesh%absolute_extend(2,2), &
+        & field_data(tag)%mesh%absolute_extend(3,1), &
+        & field_data(tag)%mesh%absolute_extend(3,2),'">'
+
+    ! Write data
+    ! Information about field
+    write(44,'(6x,a,a,a)') '<PointData Scalars="', trim(field_data(tag)%f_name),'">'
+    if (binary) then
+        write(44,'(8x,a,a,a)') '<DataArray type="Float64" Name="', &
+                & trim(field_data(tag)%f_name),'" NumberOfComponents="1" format="binary">'
+        ! Data in binary format
+        ! XXX TODO write(44, ')
+    else
+        write(44,'(8x,a,a,a)') '<DataArray type="Float64" Name="', &
+                & trim(field_data(tag)%f_name),'" NumberOfComponents="1" format="ascii">'
+        ! Data in ascii format
+        write(format_data,'(a,i0,a)') '(',field_data(tag)%mesh%N_proc(3), '(f15.5,x))'
+
+        do k = 1, field_data(tag)%mesh%N_proc(3)
+            do j = 1, field_data(tag)%mesh%N_proc(2)
+                write(44, format_data) values(:,j,k)
+            end do
+        end do
+    end if
+
+    ! Close environment
+    write(44,'(8x,a12)') '</DataArray>'
+    write(44,'(6x,a12)') '</PointData>'
+
+    ! Write footer
+    write(44, '(4x,a8)') '</Piece>'
+    write(44, '(2x,a12)') '</ImageData>'
+    write(44, '(a10)') '</VTKFile>'
+
+
+    close(44)
+    field_data(tag)%iteration = field_data(tag)%iteration + 1
+
+end subroutine parallel_scalar
+
+
+!> Write the parallel file wich contain information about distribution (but no datas) for parallel vtk output.
+!!    @param[in,out]    tag     = tag associated to the field to save (ie matching indice in the table "field_data")
+subroutine parallel_file_description(tag)
+
+    integer, intent(in)         :: tag
+
+    character (len=long_name)   :: file_name
+    integer                     :: rank
+
+    ! ===== Write data ===== 
+
+    ! Open file
+    file_name = trim(dir)//trim(output_name(tag))
+    open(unit = 45, file=file_name, status='new', form='formatted')
+
+    ! Write header
+    write(45, '(a21)') '<?xml version="1.0"?>'
+    write(45, '(a)') '<VTKFile type="PImageData" version="0.1" byte_order="LittleEndian">'
+    write(45,'(2x,a,i1,x,i0,x,i1,x,i0,x,i1,x,i0,a)') '<PImageData WholeExtent="', &
+            & 1, field_data(tag)%mesh%N(1), 1, field_data(tag)%mesh%N(2), 1, field_data(tag)%mesh%N(3), '"'
+    write(45, '(4x,a)') 'GhostLevel="0"'
+    write(45, '(4x,a)') 'Origin="0 0 0"'
+    write(45, '(4x,a,f7.5,x,f7.5,x,f7.5,a)') 'Spacing="', field_data(tag)%mesh%d_space(1), &
+        & field_data(tag)%mesh%d_space(2), field_data(tag)%mesh%d_space(3), '">'
+    ! Write information about data field
+    write(45,'(6x,a,a,a)') '<PPointData Scalars="', trim(field_data(tag)%f_name),'">'
+    write(45,'(8x,a,a,a)') '<PDataArray type="Float64" Name="', trim(field_data(tag)%f_name),'" NumberOfComponents="1" >'
+    write(45,'(8x,a13)') '</PDataArray>'
+    write(45,'(6x,a13)') '</PPointData>'
+
+    ! Piece description
+    do rank = 0, total_nb_proc-1
+        write(45, '(4x,a,i0,x,i0,x,i0,x,i0,x,i0,x,i0,a)') '<Piece Extent="', &
+            & field_data(tag)%piece_extend(rank,1,1), &
+            & field_data(tag)%piece_extend(rank,1,2), &
+            & field_data(tag)%piece_extend(rank,2,1), &
+            & field_data(tag)%piece_extend(rank,2,2), &
+            & field_data(tag)%piece_extend(rank,3,1), &
+            & field_data(tag)%piece_extend(rank,3,2),'"'
+        write(45, '(6x,a8,a,a)') 'Source="',trim(output_name(tag,rank)),'"/>'
+    end do
+
+    ! Write footer
+    write(45, '(2x,a13)') '</PImageData>'
+    write(45, '(a10)') '</VTKFile>'
+
+    close(45)
+
+end subroutine parallel_file_description
+
+
+!> Search the tag associated to a given name
+!!    @param[in]    f_name  = name of the field (as it will appear in the output)
+!!    @return       tag     = tag associated to the field to save (ie matching indice in the table "field_data")
+!> Search the tag associated to a given name
+!!    @param[in]    f_name  = name of the field (as it will appear in the output)
+!!    @return       tag     = tag associated to the field to save (ie matching indice in the table "field_data")
+function search_io_tag(f_name) result(tag)
+
+    character (len=short_name), intent(in)  ::  f_name
+    integer                                 ::  tag
+
+    tag = 1
+    do while (field_data(tag)%f_name /= f_name)
+        tag = tag + 1
+        if (tag > field_number) then
+            print *, 'wrong output name = ', f_name
+            stop ' it is not usefull to continue computation without saving result'
+        end if
+    end do
+
+end function search_io_tag
+
+
+!> Create output name by concatenation of field name, rank (optional) and indice of the current output.
+!!    @param[in]    tag     = tag associated to the field to save (ie matching indice in the table "field_data")
+!!    @param[in]    rank    = rank of the current processus
+!!    @return       io_name = name of the output
+function output_name(tag, rank) result(io_name)
+    
+    integer, intent(in)                 :: tag
+    integer, intent(in), optional       :: rank
+    character (len=long_name)           :: io_name
+
+    write(io_name,format_no_rank) trim(field_data(tag)%f_name), &
+        & '-',field_data(tag)%iteration,'.pvti'
+    if (present(rank)) then
+        write(io_name,format_rank) trim(field_data(tag)%f_name), &
+            &'_p',rank,'-',field_data(tag)%iteration,'.vti'
+    end if
+
+end function output_name
+
+end module parallel_io
+!! @}
diff --git a/HySoP/src/scalesInterface/output/parallel_io_bin.f90 b/HySoP/src/scalesInterface/output/parallel_io_bin.f90
new file mode 100644
index 000000000..1d772dc9a
--- /dev/null
+++ b/HySoP/src/scalesInterface/output/parallel_io_bin.f90
@@ -0,0 +1,468 @@
+!------------------------------------------------------------------------------
+!
+! MODULE: parallel_out
+!
+!
+! DESCRIPTION: 
+!> This module provide all procedure needed to perform parallel and distribued
+!! output at vtk format. All data ouptut are binary output rather than ascii
+!! output.
+!
+!! @details
+!!        This module developp tools to write distribued output. This means that
+!!    output of one field will be done in one file per (mpi) processus. These allows
+!!    to write field computed with an high resolution without be limitated by the
+!!    size of the file output and to avoid to big loading time during visualisation
+!!    or file loading in order to initialize computation at a given setup.
+!
+!!         This first version provide only output tools. Some input procedures could
+!!    be add in future works. The general context (number of field to save, physical 
+!!    dimension, number of processus, ...) is initiliazed by calling "parallel_io_init_all"
+!!    the context specific to each field (mesh resolution, number of point, name of
+!!    the ouput, information about time sequence, ...) is initialized or save by
+!!    calling "parallel_io_init_field". After that, call "parallel_write" in order to
+!!    create a new output of the field.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+!> @addtogroup output
+!! @{
+module parallel_io_bin
+
+    use precision
+    use cart_mesh
+
+    implicit none
+
+
+    interface parallel_write
+        !module procedure parallel_vect3D, parallel_scalar
+        module procedure parallel_scalar
+    end interface parallel_write
+
+
+    ! ===== Parameter about name size =====
+    !> Size of character string for the name of each field (name of output will be longer)
+    integer, private, parameter :: short_name = 12
+    !> Number of character use to write the indice of an output
+    integer, private, parameter :: size_ite = 3
+    !> Number of character use to write the rank of the processus
+    integer, private            :: size_rank = 3
+    !> Size of character string corresponding to the final output name
+    integer, private            :: long_name = short_name + size_ite + 8
+
+    ! ===== Type =====
+    ! > Information about data to save
+    type io_data
+        !> field name
+        character (len=short_name)              :: f_name
+        !> indice of the current output - in order to construct time sequence.
+        integer                                 :: iteration
+        !> mesh information
+        type(cartesian_mesh)                    :: mesh
+        !> piece extend (for distribued vtk xml format)
+        integer, dimension(:,:,:), allocatable  :: piece_extend
+    end type io_data
+
+    ! ===== Public procedures =====
+    !> Generique procedure to write field in distribued vtk files
+    public                  :: parallel_write
+    ! Initialisation of the mesh context
+    public                  :: parallel_io_init_all
+    public                  :: parallel_io_init_field
+    ! Destruct the io context and free memory
+    public                  :: parallel_io_finish
+
+
+    ! ===== Private procedures =====
+    ! Specifique procedure to write field in distribued vtk files
+    !private                 :: parallel_vect3D
+    private                 :: parallel_scalar
+    ! To search the right tag if the input one does not match to the field name.
+    private                 :: search_io_tag
+
+
+    ! ===== Private variable =====
+    !> number of processes in each direction
+    integer, dimension(3), private                      :: nb_proc
+    !> total number of processes 
+    integer, private                                    :: total_nb_proc
+    !> space lengh of th domain
+    real(WP), dimension(3), private                     :: domain_length
+    !> coordinate of the current processus
+    integer, dimension(3), private                      :: coord3D
+    !> number of different field to save
+    integer, private                                    :: field_number
+    !> number of field for which io_data are already initialized
+    integer, private                                    :: field_initialized
+    !> io_data for each field to save
+    type(io_data), dimension(:), allocatable, private   :: field_data
+    !> rank of current processus (in the 3D cartesian communicator)
+    integer, private                                    :: rank3D
+    !> write format for output name of *.pvti (file without data listing the distribued output)
+    character (len=100), private                        :: format_no_rank
+    !> write format for output name of *.vti (distribued file containing the data)
+    character (len=100), private                        :: format_rank
+    !> write format for datas (ie array output)
+    character (len=100), private                        :: format_data
+    !> directory where data are saved
+    character (len=50), private                         :: dir = './save/'
+    !> smallest value which could be write
+    integer, parameter, private                         :: minValue = 1e-9
+    !> Format of output (ascii or binary)
+    logical, private                                    :: binary = .true.
+
+    contains
+
+!> Initialize the general io context
+!!    @param[in]    nb_field    = number of different field to save
+!!    @param[in]    nb_proc_3D  = number of processus along the different direction
+!!    @param[in]    length_3D   = size of the (physical or eventually spectral) domain
+!!    @param[in]    rank_3D     = rank of the current processus in the 3D mpi-topology
+!!    @param[in]    coord_3D    = coordinates of the current processus in the 3D mpi-topology
+!!    @param[in]    dir_name    = (optional) name of the directory where output have to be done
+subroutine parallel_io_init_all(nb_field, nb_proc_3D, length_3D, rank_3D, coord_3D, dir_name)
+
+    implicit none
+
+    integer, intent(in)                     :: nb_field, rank_3D
+    integer, dimension(3), intent(in)       :: nb_proc_3D, coord_3D
+    real(WP), dimension(3), intent(in)      :: length_3D
+    character (len=*), intent(in), optional :: dir_name
+
+    
+    ! Update save directory
+    if (present(dir_name)) dir = trim(dir_name)
+    
+    ! copy data
+    field_number    = nb_field
+    nb_proc         = nb_proc_3D
+    domain_length   = length_3D
+    rank3D          = rank_3D
+    coord3D         = coord_3D
+
+    ! Allocate field_data
+    allocate(field_data(field_number))
+    field_initialized = 0
+    
+    ! Compute "size_rank"
+    total_nb_proc = nb_proc(1)*nb_proc(2)*nb_proc(3)
+    size_rank = 1
+    do while (total_nb_proc/(10**size_rank)>1)
+        size_rank = size_rank + 1
+    end do
+    long_name = long_name + size_rank + len(trim(dir))
+
+    ! Init format for output file name
+    write(format_no_rank,'(a,i0,a1,i0,a)') '(a,a1,i',size_ite,'.',size_ite,',a)'
+    write(format_rank,'(a,i0,a1,i0,a,i0,a1,i0,a)') '(a,a2,i',size_rank,'.',size_rank, &
+            & ',a1,i',size_ite,'.',size_ite,',a)'
+
+end subroutine parallel_io_init_all
+
+
+!> Initialize the context associated to a given field
+!!    @param[in]    f_name  = name of the field (as it will appear in the output)
+!!    @param[out]   tag         = tag used to identifiate the field from the others one when using "parallel_write"
+!!    @param[in]    mesh        = mesh data associated to the current field (optional, if not present use default one)
+subroutine parallel_io_init_field(f_name, tag, mesh)
+
+    use mpi
+    use cart_topology
+
+    implicit none
+
+    character (len=*), intent(in)               :: f_name
+    type(cartesian_mesh), intent(in), optional  :: mesh
+    integer, intent(out)                        :: tag
+
+    integer                                     :: rank         ! mpi processus rank
+    integer, dimension(3)                       :: coord3D      ! coordinate of the processus in the 3D mpi topology
+    integer                                     :: ierr         ! mpi error code
+    integer                                     :: direction
+
+    if (field_initialized<field_number) then 
+        field_initialized = field_initialized + 1
+    else
+        stop 'more field to save than anounced in the construction of the io context (vtk parallel output)'
+    end if
+
+    tag = field_initialized
+
+    ! Save io_data for the field
+    field_data(tag)%f_name = f_name
+    field_data(tag)%iteration  = 0
+    if(present(mesh)) then
+        field_data(tag)%mesh = mesh
+    else
+        call mesh_save_default(field_data(tag)%mesh)
+    end if
+
+    ! Compute piece extend
+    allocate(field_data(tag)%piece_extend(0:total_nb_proc-1,3,2))
+    do rank = 0, total_nb_proc-1
+        call mpi_cart_coords(cart_comm, rank, 3, coord3D, ierr)
+        do direction=1, 3
+            field_data(tag)%piece_extend(rank,direction,1)=coord3D(direction)*field_data(tag)%mesh%N_proc(direction)+1
+            if (coord(direction)<nb_proc(direction)-1) then
+                field_data(tag)%piece_extend(rank,direction,2)=(coord3D(direction)+1)*field_data(tag)%mesh%N_proc(direction)+1
+            else
+                field_data(tag)%piece_extend(rank,direction,2)=(coord3D(direction)+1)*field_data(tag)%mesh%N_proc(direction)
+            end if
+        end do
+    end do
+
+end subroutine parallel_io_init_field
+
+
+!> Destruct the io context and free memory.
+subroutine parallel_io_finish()
+
+    integer     :: tag      ! field identifiant
+
+    ! Free memory
+    do tag = 1, field_initialized
+        deallocate(field_data(tag)%piece_extend)
+    end do
+    deallocate(field_data)
+
+end subroutine parallel_io_finish
+
+
+! ===========================================================
+! ==================== Private procedures ===================
+! ===========================================================
+
+!> Write an output for an "one-component" field (eg scalar)
+!!    @param[in,out]    tag     = tag associated to the field to save (ie matching indice in the table "field_data")
+!!    @param[in]        values  = field (ie real values) to write
+!!    @param[in]        f_name  = name of the field (optional, redondant with tag, can be used to check it)
+subroutine parallel_scalar(tag, values, f_name)
+
+    integer, intent(inout)                      :: tag
+    character (len=*), intent(in), optional     :: f_name
+    double precision, dimension(:,:,:), intent(in)      :: values
+    
+    character (len=long_name+size_rank)         :: file_name    ! output name
+    integer                                     :: k, j, i      ! some array indices
+    character (len=short_name)                  :: f_name_bis
+    character (len=2000)                 :: buffer_char  ! to write in ascii in a file open in binary (for vtk header)
+    
+    ! Check tag for output
+    if (present(f_name)) then
+        f_name_bis = trim(f_name)
+        if (field_data(tag)%f_name /= f_name_bis) tag = search_io_tag(f_name_bis)
+    end if
+
+    ! XXX TODO OR NOT ? Check mesh data ? XXX
+
+    ! ===== Write data ===== 
+
+    ! Write the parallel file wich contain information about distribution (but no datas)
+    if (rank3D==0) call parallel_file_description(tag)
+
+    ! Write local file
+    ! Open file
+    file_name = trim(dir)//trim(output_name(tag, rank3D))
+    open(unit = 44, file=file_name, form='unformatted', access='stream')
+!    if (binary) then
+        !open(unit = 44, file=file_name, status='new', access='stream')
+!        open(unit = 44, file=file_name, status='new', form='formatted')
+!    else
+!        open(unit = 44, file=file_name, status='new', form='formatted')
+!    end if
+    ! XXX TODO : on ouvre en binaire, puis quand on écrit des char, il fait bien
+    ! du ascii !!! 
+    ! Write header
+    write(buffer_char, '(a21)') '<?xml version="1.0"?>'
+    write(44) trim(buffer_char)
+    write(44) char(10)
+    write(buffer_char, '(a)') '<VTKFile type="ImageData" version="0.1" byte_order="LittleEndian">'
+    write(44) trim(buffer_char)
+    write(44) char(10)
+    write(buffer_char,'(2x,a,i0,x,i0,x,i0,x,i0,x,i0,x,i0,a)') '<ImageData WholeExtent="', &
+        & field_data(tag)%piece_extend(rank3D,1,1), &
+        & field_data(tag)%piece_extend(rank3D,1,2), &
+        & field_data(tag)%piece_extend(rank3D,2,1), &
+        & field_data(tag)%piece_extend(rank3D,2,2), &
+        & field_data(tag)%piece_extend(rank3D,3,1), &
+        & field_data(tag)%piece_extend(rank3D,3,2),'"'
+    write(44) trim(buffer_char)
+    write(44) char(10)
+    write(buffer_char, '(4x,a)') 'Origin="0 0 0"'
+    write(44) trim(buffer_char)
+    write(44) char(10)
+    write(buffer_char, '(4x,a,f6.4,x,f6.4,x,f6.4,a)') 'Spacing="', field_data(tag)%mesh%d_space(1), &
+        & field_data(tag)%mesh%d_space(2), field_data(tag)%mesh%d_space(3), '">'
+    write(44) trim(buffer_char)
+    write(44) char(10)
+    write(buffer_char, '(4x,a,i0,x,i0,x,i0,x,i0,x,i0,x,i0,a)') '<Piece Extent="', &
+        & field_data(tag)%mesh%absolute_extend(1,1), &
+        & field_data(tag)%mesh%absolute_extend(1,2), &
+        & field_data(tag)%mesh%absolute_extend(2,1), &
+        & field_data(tag)%mesh%absolute_extend(2,2), &
+        & field_data(tag)%mesh%absolute_extend(3,1), &
+        & field_data(tag)%mesh%absolute_extend(3,2),'">'
+    write(44) trim(buffer_char)
+    write(44) char(10)
+
+    ! Write data
+    ! Information about field
+    write(buffer_char,'(6x,a,a,a)') '<PointData Scalars="', trim(field_data(tag)%f_name),'">'
+    write(44) trim(buffer_char)
+    write(44) char(10)
+!    if (binary) then
+        write(buffer_char,'(8x,a,a,a)') '<DataArray type="Float64" Name="', &
+                & trim(field_data(tag)%f_name),'" NumberOfComponents="1" format="binary">'
+        write(44) trim(buffer_char)
+        write(44) char(10)
+
+        do k = 1, field_data(tag)%mesh%N_proc(3)
+            do j = 1, field_data(tag)%mesh%N_proc(2)
+                do i = 1, field_data(tag)%mesh%N_proc(1)
+                    write(44) dble(1.0)
+                end do
+            end do
+        end do
+        write(44) char(10)
+!    else
+!        write(buffer_char,'(8x,a,a,a)') '<DataArray type="Float64" Name="', &
+!                & trim(field_data(tag)%f_name),'" NumberOfComponents="1" format="ascii">'
+!        write(44) trim(buffer_char)
+!        write(44) char(10)
+!        ! Data in ascii format
+!        write(format_data,'(a,i0,a)') '(',field_data(tag)%mesh%N_proc(1), '(f15.5,x))'
+!
+!        do k = 1, field_data(tag)%mesh%N_proc(3)
+!            do j = 1, field_data(tag)%mesh%N_proc(2)
+!                write(buffer_char, format_data) values(:,j,k)
+!                write(*) buffer_char
+!                write(44) char(10)
+!            end do
+!        end do
+!    end if
+
+    ! Close environment
+    write(buffer_char,'(8x,a12)') '</DataArray>'
+    write(44) trim(buffer_char)
+    write(44) char(10)
+    write(buffer_char,'(6x,a12)') '</PointData>'
+    write(44) trim(buffer_char)
+    write(44) char(10)
+
+    ! Write footer
+    write(buffer_char, '(4x,a8)') '</Piece>'
+    write(44) trim(buffer_char)
+    write(44) char(10)
+    write(buffer_char, '(2x,a12)') '</ImageData>'
+    write(44) trim(buffer_char)
+    write(44) char(10)
+    write(buffer_char, '(a10)') '</VTKFile>'
+    write(44) trim(buffer_char)
+    write(44) char(10)
+
+
+    close(44)
+    field_data(tag)%iteration = field_data(tag)%iteration + 1
+
+end subroutine parallel_scalar
+
+
+!> Write the parallel file wich contain information about distribution (but no datas) for parallel vtk output.
+!!    @param[in,out]    tag     = tag associated to the field to save (ie matching indice in the table "field_data")
+subroutine parallel_file_description(tag)
+
+    integer, intent(in)         :: tag
+
+    character (len=long_name)   :: file_name
+    integer                     :: rank
+
+    ! ===== Write data ===== 
+
+    ! Open file
+    file_name = trim(dir)//trim(output_name(tag))
+    open(unit = 45, file=file_name, status='new', form='formatted')
+
+    ! Write header
+    write(45, '(a21)') '<?xml version="1.0"?>'
+    write(45, '(a)') '<VTKFile type="PImageData" version="0.1" byte_order="LittleEndian">'
+    write(45,'(2x,a,i1,x,i0,x,i1,x,i0,x,i1,x,i0,a)') '<PImageData WholeExtent="', &
+            & 1, field_data(tag)%mesh%N(1), 1, field_data(tag)%mesh%N(2), 1, field_data(tag)%mesh%N(3), '"'
+    write(45, '(4x,a)') 'GhostLevel="0"'
+    write(45, '(4x,a)') 'Origin="0 0 0"'
+    write(45, '(4x,a,f7.5,x,f7.5,x,f7.5,a)') 'Spacing="', field_data(tag)%mesh%d_space(1), &
+        & field_data(tag)%mesh%d_space(2), field_data(tag)%mesh%d_space(3), '">'
+    ! Write information about data field
+    write(45,'(6x,a,a,a)') '<PPointData Scalars="', trim(field_data(tag)%f_name),'">'
+    write(45,'(8x,a,a,a)') '<PDataArray type="Float64" Name="', trim(field_data(tag)%f_name),'" NumberOfComponents="1" >'
+    write(45,'(8x,a13)') '</PDataArray>'
+    write(45,'(6x,a13)') '</PPointData>'
+
+    ! Piece description
+    do rank = 0, total_nb_proc-1
+        write(45, '(4x,a,i0,x,i0,x,i0,x,i0,x,i0,x,i0,a)') '<Piece Extent="', &
+            & field_data(tag)%piece_extend(rank,1,1), &
+            & field_data(tag)%piece_extend(rank,1,2), &
+            & field_data(tag)%piece_extend(rank,2,1), &
+            & field_data(tag)%piece_extend(rank,2,2), &
+            & field_data(tag)%piece_extend(rank,3,1), &
+            & field_data(tag)%piece_extend(rank,3,2),'"'
+        write(45, '(6x,a8,a,a)') 'Source="',trim(output_name(tag,rank)),'"/>'
+    end do
+
+    ! Write footer
+    write(45, '(2x,a13)') '</PImageData>'
+    write(45, '(a10)') '</VTKFile>'
+
+    close(45)
+
+end subroutine parallel_file_description
+
+
+!> Search the tag associated to a given name
+!!    @param[in]    f_name  = name of the field (as it will appear in the output)
+!!    @return       tag     = tag associated to the field to save (ie matching indice in the table "field_data")
+!> Search the tag associated to a given name
+!!    @param[in]    f_name  = name of the field (as it will appear in the output)
+!!    @return       tag     = tag associated to the field to save (ie matching indice in the table "field_data")
+function search_io_tag(f_name) result(tag)
+
+    character (len=short_name), intent(in)  ::  f_name
+    integer                                 ::  tag
+
+    tag = 1
+    do while (field_data(tag)%f_name /= f_name)
+        tag = tag + 1
+        if (tag > field_number) then
+            print *, 'wrong output name = ', f_name
+            stop ' it is not usefull to continue computation without saving result'
+        end if
+    end do
+
+end function search_io_tag
+
+
+!> Create output name by concatenation of field name, rank (optional) and indice of the current output.
+!!    @param[in]    tag     = tag associated to the field to save (ie matching indice in the table "field_data")
+!!    @param[in]    rank    = rank of the current processus
+!!    @return       io_name = name of the output
+function output_name(tag, rank) result(io_name)
+    
+    integer, intent(in)                 :: tag
+    integer, intent(in), optional       :: rank
+    character (len=long_name)           :: io_name
+
+    write(io_name,format_no_rank) trim(field_data(tag)%f_name), &
+        & '-',field_data(tag)%iteration,'.pvti'
+    if (present(rank)) then
+        write(io_name,format_rank) trim(field_data(tag)%f_name), &
+            &'_p',rank,'-',field_data(tag)%iteration,'.vti'
+    end if
+
+end function output_name
+
+end module parallel_io_bin
+!> @}
diff --git a/HySoP/src/scalesInterface/particles/advec.f90 b/HySoP/src/scalesInterface/particles/advec.f90
new file mode 100644
index 000000000..6fa7e0341
--- /dev/null
+++ b/HySoP/src/scalesInterface/particles/advec.f90
@@ -0,0 +1,151 @@
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec
+!
+!
+! DESCRIPTION: 
+!> The module advec provides all public interfaces to solve an advection equation 
+!! with a particle method.
+!
+!> @details
+!!     This module contains the generic procedure to initialize and parametrise the
+!! advection solver based on particles method. It also contains the subroutine
+!! "advec_step" wich solves the equation for a given time step. It is the only one
+!! module which is supposed to be included by a code using this library of
+!! particle methods.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+module advec
+
+  use string
+  use advecX
+  use advecY
+  use advecZ
+
+  implicit none
+
+  ! ===== Variables =====
+  !> numerical method use to advect the scalar
+  character(len=str_short), private   :: type_part_solv   
+  !> dimensionnal splitting (eg classical, Strang or particle)
+  character(len=str_short), private   :: dim_splitting    
+
+
+  ! ===== Public procedures =====
+  ! Scheme used to advec the scalar (order 2 or 4 ?)
+  public                              :: type_part_solver
+
+  ! Advection methods
+  public                              :: advec_init   ! initialize the scalar solver
+  public                              :: advec_step   ! advec the scalar field during a time step.
+
+contains
+
+  ! ===== Public methods =====
+
+  !> Return the name of the particle method used for the advection
+  !!    @return type_part_solver      = numerical method used for advection
+  function type_part_solver()
+    character(len=str_short)    :: type_part_solver
+
+    type_part_solver = type_part_solv
+  end function type_part_solver
+
+  !> Initialise the particle advection methods
+  !!    @param[in]    order       = to choose the remeshing method (and thus the order)
+  !!    @param[out]   stab_coeff  = stability coefficient (condition stability is
+  !!                                  dt< stab_coeff/norm_inf(V))
+  !!    @param[in]    verbosity   = to display info about chosen remeshing formula (optional)
+  subroutine advec_init(order, stab_coeff, verbosity)
+
+    ! Input/Output
+    character(len=*), optional, intent(in)  ::  order
+    logical, optional, intent(in)           ::  verbosity
+    real(WP), optional, intent(out)         ::  stab_coeff
+
+    ! Use default solver if it is not chosen by the user.
+    if(present(order)) then
+       type_part_solv = order
+    else 
+       type_part_solv = 'p_O2'
+    end if
+
+    ! Initialize the solver
+    if (present(verbosity)) then
+       call AC_solver_init(type_part_solv, verbosity)
+    else
+       call AC_solver_init(type_part_solv)
+    end if
+
+    ! Choosing the dimensionnal splitting to use
+    ! XXX parser le fichier input
+    dim_splitting = 'strang'
+
+    ! Compute stability coefficient
+    if (present(stab_coeff)) stab_coeff = 1.0/(2.0*dble(bl_size))
+
+  end subroutine advec_init
+
+
+  !> advec_scheme
+  !!    @param[in]        dt          = time step
+  !!    @param[in]        Vx          = velocity along x (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in]        Vy          = velocity along y 
+  !!    @param[in]        Vz          = velocity along z
+  !!    @param[in,out]    scal        = scalar field to advect
+  !!    @param[in]        dim_split   = dimensionnal splitting (eg classical,
+  !!                                        Strang splitting or particle splitting)
+  subroutine advec_step(dt, Vx, Vy, Vz, scal, dim_split)
+
+    ! Input/Output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vx, Vy, Vz
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
+    character(len=*), optional, intent(in)                      :: dim_split
+    ! Other local variables 
+    character(len=str_short)        :: splitting    ! to choose the splitting
+
+    ! Default dimensionnal splitting if the user do not choose it
+    if(present(dim_split)) then
+       splitting = dim_split
+    else 
+       splitting = dim_splitting
+    end if
+
+    ! Scheme used for advection : particle method (order 2 or 4) or spectral one.
+    if (type_solv=='spectral') then
+       print*, 'Solveur non implémenté'
+    end if
+
+    select case(splitting)
+    case('classic')
+       call advecX_calc(dt, Vx, scal, type_part_solv)
+       call advecY_calc(dt, Vy, scal, type_part_solv)
+       call advecZ_calc(dt, Vz, scal, type_part_solv)
+    case('Strang')
+       call advecX_calc(dt/2.0, Vx, scal, type_part_solv)
+       call advecY_calc(dt/2.0, Vy, scal, type_part_solv)
+       call advecZ_calc(dt/2.0, Vz, scal, type_part_solv)
+       call advecZ_calc(dt/2.0, Vz, scal, type_part_solv)
+       call advecY_calc(dt/2.0, Vy, scal, type_part_solv)
+       call advecX_calc(dt/2.0, Vx, scal, type_part_solv)
+    case default
+       call advecX_calc(dt/2.0, Vx, scal, type_part_solv)
+       call advecY_calc(dt/2.0, Vy, scal, type_part_solv)
+       call advecZ_calc(dt/2.0, Vz, scal, type_part_solv)
+       call advecZ_calc(dt/2.0, Vz, scal, type_part_solv)
+       call advecY_calc(dt/2.0, Vy, scal, type_part_solv)
+       call advecX_calc(dt/2.0, Vx, scal, type_part_solv)
+    end select
+
+  end subroutine advec_step
+
+
+  !> ===== Private procedure =====
+end module advec
+!> @}
diff --git a/HySoP/src/scalesInterface/particles/advecX.f90 b/HySoP/src/scalesInterface/particles/advecX.f90
new file mode 100644
index 000000000..9636852d9
--- /dev/null
+++ b/HySoP/src/scalesInterface/particles/advecX.f90
@@ -0,0 +1,757 @@
+!> @addtogroup part
+
+!------------------------------------------------------------------------------
+!
+! MODULE: advecX
+!
+!
+! DESCRIPTION: 
+!> The module advecX is devoted to the advection along X axis of a scalar field.
+!! It used particle method and provides a parallel implementation.
+!
+!> @details
+!! This module is a part of the advection solver based on particles method.
+!! The solver uses some dimensionnal splitting and this module contains all the
+!! method used to solve advection along the X-axis. This is a parallel
+!! implementation using MPI and the cartesien topology it provides.
+!!
+!! This module can use the method and variables defined in the module
+!! "advec_common" which gather information and tools shared for advection along
+!! x, y and z-axis.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advecX
+
+  use precision
+  use advec_remeshing_formula
+
+  implicit none
+
+  ! ===== Public procedures =====
+  !> Generique procedure to advect the scalar with a particles solver
+  public                  :: advecX_calc
+  !----- (corrected) Remeshing method (these methods are set to public in validation purposes) -----
+  public                  :: Xremesh_O2       ! order 2
+  public                  :: Xremesh_O4       ! order 4
+  ! ----- Other remeshing formula -----
+  public                  :: Xremesh_Mprime6  
+
+  ! ===== Private porcedures =====
+  !> particles solver with remeshing method at order 2
+  private                 :: advecX_calc_O2       ! remeshing method at order 2
+  private                 :: advecX_calc_O2_group ! remeshing method at order 2
+  private                 :: advecX_calc_Mprime6  ! M'6 remeshing method
+  ! Particles initialisation
+  private                 :: advecX_init      ! generic initialization of particle position, velocity.
+  private                 :: advecX_init_line ! initialisation for only one line of particles
+  private                 :: advecX_init_group! initialisation for a group of line of particles
+
+  ! ===== Private variable ====
+  ! particles solver with different remeshing formula
+  integer, dimension(2), private  :: gpX_size     
+  !> Current direction = along X
+  integer, private, parameter     :: direction=1          
+
+  interface advecX_init
+     module procedure advecX_init_line, advecX_init_group
+  end interface advecX_init
+
+contains
+
+  ! #####################################################################################
+  ! #####                                                                           #####
+  ! #####                         Public procedure                                  #####
+  ! #####                                                                           #####
+  ! #####################################################################################
+
+  ! ====================================================================
+  ! ====================    Generic solveur         ====================
+  ! ====================================================================
+
+  !> Scalar advection (this procedure call the right solver, depending on the simulation setup) 
+  !!    @param[in]        dt          = time step
+  !!    @param[in]        Vx          = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    SC          = scalar field to advect
+  !!    @param[in]        type_solver = scheme use for the advection (particle with order 2 or 4)
+  subroutine advecX_calc(dt, Vx, SC, type_solver)
+
+    ! Input/Output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vx
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: SC
+    character(len=*), intent(in)                                        :: type_solver
+
+    ! Allocate send_ind_min/max
+    if(allocated(send_group_min)) deallocate(send_group_min)
+    allocate(send_group_min(group_size(direction,1),group_size(direction,2)))
+    if(allocated(send_group_max)) deallocate(send_group_max)
+    allocate(send_group_max(group_size(direction,1),group_size(direction,2)))
+
+    ! Call the right solver depending on the space order we want to.
+    select case(type_solver)
+    case('p_O2')
+       call advecX_calc_O2_group(dt, Vx, SC, group_size(direction,:))
+       !call advecX_calc_O2(dt, Vx, SC)
+    case('p_O4')
+       call advecX_calc_O4(dt, Vx, SC, group_size(direction,:))
+    case('p_M6')
+       call advecX_calc_Mprime6(dt, Vx, SC, group_size(direction,:))
+    case default
+       call advecX_calc_O2_group(dt, Vx, SC, group_size(direction,:))
+    end select
+
+
+  end subroutine advecX_calc
+
+
+  ! #####################################################################################
+  ! #####                                                                           #####
+  ! #####                         Private procedure                                 #####
+  ! #####                                                                           #####
+  ! #####################################################################################
+
+  ! ====================================================================
+  ! ====================    Different solvers       ====================
+  ! ====================================================================
+
+  !> Advection during a time step dt - order 2
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vx      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D   = scalar field to advect
+  subroutine advecX_calc_O2(dt,Vx,scal3D)
+
+    ! Input/Output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vx
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    ! Other local variables
+    integer                             :: j,k          ! indice of the currend mesh point
+    integer, dimension(2)               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    integer                             :: direction=1  ! current direction = along Y
+    real(WP), dimension(N_proc(1))      :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(1))      :: p_V          ! particles velocity 
+    logical, dimension(bl_nb(1)+1)      :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(1))        :: bl_tag       ! indice of tagged particles
+
+    ind_group = 0
+    do k = 1, N_proc(3)
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do j = 1, N_proc(2)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecX_init(Vx, j, k, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block(dt, direction, ind_group, p_V, &
+               & bl_type, bl_tag)
+          ! -- Remeshing --
+          call Xremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal3D)
+
+       end do
+    end do
+
+  end subroutine advecX_calc_O2
+
+
+  !> Advection during a time step dt - order 2
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vx      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of groups (along X direction)
+  subroutine advecX_calc_O2_group(dt,Vx,scal3D,gs)
+
+    ! Input/Output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vx
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    integer, dimension(2), intent(in)                                   :: gs   ! size of lines group along Y
+    ! Other local variables
+    integer                                             :: j,k          ! indice of the currend mesh point
+    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_V          ! particles velocity 
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+
+    ind_group = 0
+
+    do k = 1, N_proc(3), gs(2)
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do j = 1, N_proc(2), gs(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecX_init_group(Vx, j, k, gs, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Xremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag, j,k,scal3D)
+
+       end do
+    end do
+
+  end subroutine advecX_calc_O2_group
+
+
+  !> Advection during a time step dt - order 4
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vx      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of groups (along X direction)
+  subroutine advecX_calc_O4(dt,Vx,scal3D,gs)
+
+    ! Input/Output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vx
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    integer, dimension(2), intent(in)                                   :: gs   ! size of lines group along Y
+    ! Other local variables
+    integer                                             :: j,k          ! indice of the currend mesh point
+    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_V          ! particles velocity 
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+
+    ind_group = 0
+
+    do k = 1, N_proc(3), gs(2)
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do j = 1, N_proc(2), gs(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecX_init_group(Vx, j, k, gs, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Xremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag, j,k,scal3D)
+
+       end do
+    end do
+
+
+  end subroutine advecX_calc_O4
+
+
+  !> Advection during a time step dt -  M'6 remeshing formula
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vx      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of groups (along X direction)
+  subroutine advecX_calc_Mprime6(dt,Vx,scal3D,gs)
+
+    ! Input/Output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vx
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    integer, dimension(2), intent(in)                                   :: gs   ! size of lines group along Y
+    ! Other local variables
+    integer                                             :: j,k          ! indice of the currend mesh point
+    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_V          ! particles velocity 
+
+    ind_group = 0
+
+    do k = 1, N_proc(3), gs(2)
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do j = 1, N_proc(2), gs(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecX_init_group(Vx, j, k, gs, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Remeshing --
+          call Xremesh_Mprime6(ind_group, gs, p_pos_adim, j,k,scal3D)
+
+       end do
+    end do
+
+
+  end subroutine advecX_calc_Mprime6
+
+
+  ! ====================================================================
+  ! ====================   Remeshing subroutines    ====================
+  ! ====================================================================
+
+  !> remeshing with an order 2 method, corrected to allow large CFL number - untagged particles
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Xremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal)
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, intent(in)                                                 :: j, k
+    logical, dimension(:), intent(in)                                   :: bl_type
+    logical, dimension(:), intent(in)                                   :: bl_tag
+    real(WP), dimension(:), intent(in)                                  :: p_pos_adim
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
+    ! Other local variables 
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2)               :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
+    ! I will receive data
+    integer                             :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer                             :: proc_max     ! smaller gap between me and the processes to where I send data
+
+
+    !  -- Compute ranges --
+    if (bl_type(1)) then
+       ! First particle is a centered one
+       send_j_min = nint(p_pos_adim(1))-1
+    else
+       ! First particle is a left one
+       send_j_min = floor(p_pos_adim(1))-1
+    end if
+    if (bl_type(N_proc(direction)/bl_size +1)) then
+       ! Last particle is a centered one
+       send_j_max = nint(p_pos_adim(N_proc(direction)))+1
+    else
+       ! Last particle is a left one
+       send_j_max = floor(p_pos_adim(N_proc(direction)))+1
+    end if
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_line(send_j_min, send_j_max, direction, ind_group, proc_min, proc_max, rece_proc)
+
+    ! -- Allocate buffer for remeshing of local particles --
+    allocate(send_buffer(send_j_min:send_j_max))
+    send_buffer = 0.0;
+
+    ! -- Remesh the particles in the buffer --
+    call AC_remesh_lambda2corrected(direction, p_pos_adim, scal(:,j,k), bl_type, bl_tag, send_j_min, send_j_max, send_buffer)
+
+    ! -- Send the buffer to the matching processus and update the scalar field --
+    scal(:,j,k) = 0
+    call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min, proc_max, rece_proc, send_buffer, scal(:,j,k))
+
+    ! Deallocate all field
+    deallocate(send_buffer)
+
+  end subroutine Xremesh_O2
+
+  !> remeshing with an order 2 method, corrected to allow large CFL number - group version
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Xremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, dimension(2), intent(in)                                   :: gs
+    integer, intent(in)                                                 :: j, k
+    real(WP), dimension(N_proc(direction),gs(1),gs(2)), intent(in)      :: p_pos_adim   ! adimensionned particles position 
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2)), intent(in)      :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2)), intent(in)        :: bl_tag       ! indice of tagged particles
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
+    ! Other local variables 
+    integer                                 :: proc_gap, gap! distance between my (mpi) coordonate and coordinate of the 
+    ! processus associated to a given position
+    integer, dimension(gs(1),gs(2),2)       :: send_gap     ! distance between me and processus wich send me information
+    integer, dimension(2)                   :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(:), allocatable      :: send_rank    ! rank of processus to wich I send information
+    integer, dimension(2 , 2)               :: rece_gap     ! distance between me and processus to wich I send information
+    integer                                 :: ind   ! indices
+    integer, dimension(:,:), allocatable    :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the 
+    ! current processus will send data during remeshing and for each of these lines the range 
+    ! of mesh points from where it requiers the velocity values.
+    integer, dimension(:,:), allocatable    :: rece_carto   ! same as abobve but for what I receive
+    integer                                 :: min_size     ! minimal size of cartography(:,proc_gap)
+    integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
+    integer                                 :: ind_for_i1   ! where to read the first coordinate (i1) of the current line inside the cartography ?
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable,target:: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    ! sorted by receivers and not by coordinate.
+    integer, dimension(:), allocatable      :: pos_in_buffer! buffer size
+    type(real_pter),dimension(:),allocatable:: remesh_pter  ! pointer to send buffer in which scalar are sorted by line indice.
+    ! sorted by receivers
+
+    ! I will receive data
+    !    integer, dimension(gs(1),gs(2))         :: proc_min     ! smaller gap between me and the processes to where I send data
+    !    integer, dimension(gs(1),gs(2))         :: proc_max     ! smaller gap between me and the processes to where I send data
+
+    integer                                 :: i1, i2       ! indice of a line into the group
+    ! Variable use to manage mpi communications
+    integer                                 :: com_size     ! size of message send/receive
+    integer, dimension(:), allocatable      :: s_request_range! mpi communication request (handle) of nonblocking send
+    integer                                 :: tag          ! mpi message tag
+    integer                                 :: ierr         ! mpi error code
+
+    !  -- Compute ranges --
+    where (bl_type(1,:,:))
+       ! First particle is a centered one
+       send_group_min = nint(p_pos_adim(1,:,:))-1
+    elsewhere
+       ! First particle is a left one
+       send_group_min = floor(p_pos_adim(1,:,:))-1
+    end where
+    where (bl_type(N_proc(direction)/bl_size +1,:,:))
+       ! Last particle is a centered one
+       send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+1
+    elsewhere
+       ! Last particle is a left one
+       send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+1
+    end where
+
+    ! ===== Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) =====
+    !call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc)
+    ! -- What have I to communicate ? --
+    send_gap(:,:,1) = floor(real(send_group_min-1)/N_proc(direction))
+    send_gap(:,:,2) = floor(real(send_group_max-1)/N_proc(direction))
+    send_gap_abs(1) = minval(send_gap(:,:,1))
+    send_gap_abs(2) = maxval(send_gap(:,:,2))
+    ! -- Allocation --
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(cartography(max_size,send_gap_abs(1):send_gap_abs(2)))
+    allocate(send_rank(send_gap_abs(1):send_gap_abs(2)))
+    ! -- Determine which processes communicate together --
+    call AC_remesh_determine_communication(direction, gs, ind_group, rece_gap, send_gap, send_gap_abs, send_rank, cartography)
+    ! -- allocate cartography about what I receive --
+    allocate(rece_carto(max_size,rece_gap(1,1):rece_gap(1,2)))
+
+    ! ===== Complete cartography and send range about the particles I remesh =====
+    allocate(s_request_range(send_gap_abs(1):send_gap_abs(2)))
+    min_size = 2 + gs(2)
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+       cartography(1,proc_gap) = 0
+       ! Use the cartography to know which lines are concerned
+       com_size = cartography(2,proc_gap)
+       ! Range I want - store into the cartography
+       gap = proc_gap*N_proc(direction)
+       ! Position in cartography(:,proc_gap) of the current i1 indice
+       ind_for_i1 = min_size
+       do i2 = 1, gs(2)
+          do ind = ind_for_i1+1, ind_for_i1 + cartography(2+i2,proc_gap), 2
+             do i1 = cartography(ind,proc_gap), cartography(ind+1,proc_gap)
+                ! Interval start from:
+                cartography(com_size+1,proc_gap) = max(send_group_min(i1,i2), gap+1) ! fortran => indice start from 0
+                ! and ends at:
+                cartography(com_size+2,proc_gap) = min(send_group_max(i1,i2), gap+N_proc(direction))
+                ! update number of element to send
+                cartography(1,proc_gap) = cartography(1,proc_gap) &
+                     & + cartography(com_size+2,proc_gap) &
+                     & - cartography(com_size+1,proc_gap) + 1
+                com_size = com_size+2
+             end do
+          end do
+          ind_for_i1 = ind_for_i1 + cartography(2+i2,proc_gap)
+       end do
+       ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.                          
+       tag = compute_tag(ind_group, tag_velo_range, direction, proc_gap)
+       ! Send message
+       if (send_rank(proc_gap) /= D_rank(direction)) then
+          call mpi_Isend(cartography(1,proc_gap), com_size, MPI_INTEGER, send_rank(proc_gap), tag, &
+               & D_comm(direction), s_request_range(proc_gap),ierr)
+       else
+          ! No communication needed, I copy it directly !
+          rece_carto(:,-proc_gap) = cartography(1,proc_gap)
+       end if
+    end do
+
+    ! ===== Initialize the general buffer =====
+    ! The same buffer is used to send data to all target processes. It size
+    ! has to be computed as the part reserved to each processus.
+    ! and it has to be splitted into parts for each target
+    ! processes
+    allocate(pos_in_buffer(send_gap_abs(1):send_gap_abs(2)))
+    pos_in_buffer(send_gap_abs(1)) = 1
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)-1
+       pos_in_buffer(proc_gap+1)= pos_in_buffer(proc_gap) + cartography(1,proc_gap)
+    end do
+    allocate(send_buffer(pos_in_buffer(send_gap_abs(2)) &
+         & + cartography(1,send_gap_abs(2))))
+
+    ! XXX OPTIM : début de l'optim en court
+    !   ! -- Allocate the buffer use to remesh all the lines --
+    !   allocate(begin_proc(proc_min_abs:proc_max_abs))
+    !   begin_proc(proc_min_abs) = 1
+    !   do proc_gap = (proc_min_abs+1), proc_max_abs
+    !       begin_proc(proc_gap) = begin_proc(proc_gap-1) + size_per_proc(proc_gap)
+    !   end do
+    !   allocate(send_buffer(begin_proc(proc_max_abs) + size_per_proc(proc_max_abs)-1))
+    !   send_buffer = 0.0
+    ! XXX OPTIM : fin du codage en court
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    do i2 = 1, gs(2)
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
+
+          ! -- Allocate remesh_pter --
+          allocate(remesh_pter(send_j_min:send_j_max))
+          do ind = send_j_min, send_j_max
+             proc_gap = floor(real(ind-1)/N_proc(direction))
+             remesh_pter(ind)%pter => send_buffer(pos_in_buffer(proc_gap))
+             pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+          end do
+
+          ! -- Remesh the particles in the buffer --
+          ! XXX TODO
+          ! surcharge de AC_remesh pour prendre un tableau de real_pter
+          call AC_remesh_lambda2corrected(direction, p_pos_adim(:,i1,i2), scal(:,j+i1-1,k+i2-1), &
+               & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, remesh_pter)
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(:,j+i1-1,k+i2-1) = 0
+          ! Communiquer : envoyer à chacun sa partie du buffer et mettre à jour le
+          ! scalaire en utilisant la rece_carto
+          !            çcall AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+          !                & rece_proc(:,i1,i2), send_buffer, scal(:,j+i1-1,k+i2-1))
+          ! XXX End TODO
+          deallocate(remesh_pter)
+        end do
+    end do
+    ! Deallocate all field
+    deallocate(send_buffer)
+ 
+  end subroutine Xremesh_O2_group
+
+  !> remeshing with an order 4 method, corrected to allow large CFL number - untagged particles
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned particles position
+  !!    @param[in]        bl_tag      = contains information about block (is it tagged ?)
+  !!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in,out]    scal          = scalar field to advect
+  subroutine Xremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, dimension(2), intent(in)                                   :: gs
+    integer, intent(in)                                                 :: j, k
+    real(WP), dimension(N_proc(direction),gs(1),gs(2)), intent(in)      :: p_pos_adim   ! adimensionned particles position 
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2)), intent(in)      :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2)), intent(in)        :: bl_tag       ! indice of tagged particles
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
+    ! Other local variables 
+    ! Variables used to remesh particles ...
+    ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+    ! designes something I send (resp. I receive).
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
+    ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+    integer                             :: i1, i2       ! indice of a line into the group
+
+    !  -- Compute ranges --
+    where (bl_type(1,:,:))
+       ! First particle is a centered one
+       send_group_min = nint(p_pos_adim(1,:,:))-2
+    elsewhere
+       ! First particle is a left one
+       send_group_min = floor(p_pos_adim(1,:,:))-2
+    end where
+    where (bl_type(N_proc(direction)/bl_size +1,:,:))
+       ! Last particle is a centered one
+       send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+2
+    elsewhere
+       ! Last particle is a left one
+       send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+2
+    end where
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc)
+    !call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, proc_min_abs, proc_max_abs, rece_proc)
+
+    do i2 = 1, gs(2)
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
+
+          ! -- Allocate buffer for remeshing of local particles --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
+
+          ! -- Remesh the particles in the buffer --
+          call AC_remesh_lambda4corrected(direction, p_pos_adim(:,i1,i2), scal(:,j+i1-1,k+i2-1), &
+               & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(:,j+i1-1,k+i2-1) = 0
+          call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(:,j+i1-1,k+i2-1))
+
+          ! Deallocate all field
+          deallocate(send_buffer)
+
+       end do
+    end do
+
+  end subroutine Xremesh_O4
+
+
+  !> remeshing with M'6 formula - No tag neither correction for large time steps.
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned particles position
+  !!    @param[in]        j,k         = indice of of the current line (y-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Xremesh_Mprime6(ind_group, gs, p_pos_adim, j,k,scal)
+
+    ! Input/output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, dimension(2), intent(in)                                   :: gs
+    integer, intent(in)                                                 :: j, k
+    real(WP), dimension(N_proc(direction),gs(1),gs(2)), intent(in)      :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
+    ! Other local variables 
+    ! Variables used to remesh particles ...
+    ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+    ! designes something I send (resp. I receive).
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
+    ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+    integer                             :: i1, i2       ! indice of a line into the group
+    integer                             :: i            ! indice of the current particle
+
+    !  -- Compute the remeshing domain --
+    send_group_min = floor(p_pos_adim(1,:,:)-2)
+    send_group_max = floor(p_pos_adim(N_proc(direction),:,:)+3)
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc, 1)
+
+    do i2 = 1, gs(2)
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
+
+          ! -- Allocate buffer for remeshing of local particles --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
+
+          ! -- Remesh the particles in the buffer --
+          do i = 1, N_proc(direction), 1
+             call AC_remesh_Mprime6(p_pos_adim(i,i1,i2),scal(i,j+i1-1,k+i2-1), send_buffer)
+          end do
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(:,j+i1-1,k+i2-1) = 0
+          call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(:,j+i1-1,k+i2-1))
+
+          ! Deallocate all field
+          deallocate(send_buffer)
+
+       end do
+    end do
+
+  end subroutine Xremesh_Mprime6
+
+
+  ! ====================================================================
+  ! ====================    Initialize particle     ====================
+  ! ====================================================================
+
+  !> Creation and initialisation of a particle line (ie Y and Z coordinate are fixed)
+  !!    @param[in]    Vx          = 3D velocity field
+  !!    @param[in]    j           = Y-indice of the current line
+  !!    @param[in]    k           = Z-indice of the current line
+  !!    @param[out]   p_pos_adim  = adimensioned particles postion
+  !!    @param[out]   p_V         = particle velocity 
+  subroutine advecX_init_line(Vx, j, k, p_pos_adim, p_V)
+
+    ! Input/Output
+    integer, intent(in)                                 :: j,k
+    real(WP), dimension(N_proc(direction)), intent(out) :: p_pos_adim, p_V
+    real(WP), dimension(:,:,:), intent(in)              :: Vx
+    ! Other local variables
+    integer                                     :: ind          ! indice
+
+    do ind = 1, N_proc(direction)
+       p_pos_adim(ind) = ind
+       p_V(ind)        = Vx(ind,j,k)
+    end do
+
+  end subroutine advecX_init_line
+
+
+  !> Creation and initialisation of a group of particle line
+  !!    @param[in]    Vx          = 3D velocity field
+  !!    @param[in]    j           = Y-indice of the current line
+  !!    @param[in]    k           = Z-indice of the current line
+  !!    @param[in]    Gsize       = size of groups (along X direction)
+  !!    @param[out]   p_pos_adim  = adimensioned particles postion
+  !!    @param[out]   p_V         = particle velocity 
+  subroutine advecX_init_group(Vx, j, k, Gsize, p_pos_adim, p_V)
+
+    ! Input/Output
+    integer, intent(in)                                                     :: j,k
+    integer, dimension(2), intent(in)                                       :: Gsize
+    real(WP), dimension(N_proc(direction),Gsize(1),Gsize(2)),intent(out)    :: p_pos_adim, p_V
+    real(WP), dimension(:,:,:), intent(in)                                  :: Vx
+    ! Other local variables
+    integer                                     :: ind          ! indice
+    integer                                     :: j_gp, k_gp   ! Y and Z indice of the current line in the group
+
+    do k_gp = 1, Gsize(2)
+       do j_gp = 1, Gsize(1)
+          do ind = 1, N_proc(direction)
+             p_pos_adim(ind, j_gp, k_gp) = ind
+             p_V(ind, j_gp, k_gp)        = Vx(ind,j+j_gp-1,k+k_gp-1)
+          end do
+       end do
+    end do
+
+  end subroutine advecX_init_group
+
+end module advecX
+!> @}
diff --git a/HySoP/src/scalesInterface/particles/advecY.f90 b/HySoP/src/scalesInterface/particles/advecY.f90
new file mode 100644
index 000000000..62ebd56ef
--- /dev/null
+++ b/HySoP/src/scalesInterface/particles/advecY.f90
@@ -0,0 +1,645 @@
+!> @addtogroup part
+!! @{
+
+!------------------------------------------------------------------------------
+!
+! MODULE: advecY
+!
+!
+! DESCRIPTION: 
+!> The module advecY is devoted to the advection along Y axis of a scalar field.
+!! It used particle method and provide a parallel implementation.
+!
+!> @details
+!!     This module is a part of the advection solver based on particles method.
+!! The solver use some dimensionnal splitting and this module contains all the
+!! method used to solve advection along the Y-axis. This is a parallel
+!! implementation using MPI and the cartesien topology it provides.
+!!
+!! This module can use the method and variables defined in the module
+!! "advec_common" which gather information and tools shared for advection along
+!! x, y and z-axis.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advecY
+
+  use precision
+  use advec_remeshing_formula
+
+  implicit none
+
+  ! ===== Public procedures =====
+  !> Generique procedure to advect the scalar with a particles solver
+  public                  :: advecY_calc
+  !----- (corrected) Remeshing method (these methods are set to public in validation purposes) -----
+  public                  :: Yremesh_O2       ! order 2
+  public                  :: Yremesh_O4       ! order 4
+  ! ----- Other remeshing formula -----
+  public                  :: Yremesh_Mprime6  
+
+  ! ===== Private porcedures =====
+  !> particles solver with remeshing method at order 2
+  private                 :: advecY_calc_line     ! remeshing method at order 2
+  private                 :: advecY_calc_group    ! remeshing method at order 2 - for a group of lines
+  private                 :: advecY_calc_Mprime6  ! M'6 remeshing method
+  ! Particles initialisation
+  private                 :: advecY_init      ! generic initialization of particle position, velocity.
+  private                 :: advecY_init_line ! initialisation for only one line of particles
+  private                 :: advecY_init_group! initialisation for a group of line of particles
+
+  ! ===== Private variables =====
+  !> current direction = alongY (to avoid redefinition and make more easy cut/paste)
+  integer, parameter, private      :: direction = 2
+
+  interface advecY_init
+     module procedure advecY_init_line, advecY_init_group
+  end interface advecY_init
+
+contains
+
+
+  ! #####################################################################################
+  ! #####                                                                           #####
+  ! #####                         Public procedure                                  #####
+  ! #####                                                                           #####
+  ! #####################################################################################
+
+  ! ====================================================================
+  ! ====================    Generic solveur         ====================
+  ! ====================================================================
+
+  !> Scalar advection (this procedure call the right solver, depending on the simulation setup) 
+  !!    @param[in]        dt          = time step
+  !!    @param[in]        Vy          = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    SC          = scalar field to advect
+  !!    @param[in]        type_solver = scheme use for the advection (particle with order 2 or 4)
+  subroutine advecY_calc(dt, Vy, SC, type_solver)
+
+    ! Input/Output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vy
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: SC
+    character(len=*), intent(in)                                        :: type_solver
+
+    ! Allocate send_ind_min/max
+    if(allocated(send_group_min)) deallocate(send_group_min)
+    allocate(send_group_min(group_size(direction,1),group_size(direction,2)))
+    if(allocated(send_group_max)) deallocate(send_group_max)
+    allocate(send_group_max(group_size(direction,1),group_size(direction,2)))
+
+    ! Call the right solver depending on the space order we want to.
+    select case(type_solver)
+    case('p_O2')
+       call advecY_calc_group(dt, Vy, SC, group_size(direction,:))
+       !call advecY_calc_line(dt, Vy, SC)
+    case('p_O4')
+       call advecY_calc_O4(dt, Vy, SC, group_size(direction,:))
+    case('p_M6')
+       call advecY_calc_Mprime6(dt, Vy, SC, group_size(direction,:))
+    case default
+       call advecY_calc_group(dt, Vy, SC,group_size(direction,:))
+    end select
+
+
+  end subroutine advecY_calc
+
+
+  ! #####################################################################################
+  ! #####                                                                           #####
+  ! #####                         Private procedure                                 #####
+  ! #####                                                                           #####
+  ! #####################################################################################
+
+  ! ====================================================================
+  ! ====================    Different solvers       ====================
+  ! ====================================================================
+
+
+  !> Advection during a time step dt - order 2
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vy      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D   = scalar field to advect
+  subroutine advecY_calc_line(dt,Vy,scal3D)
+
+    ! input/output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vy
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    ! other local variables
+    integer                                 :: i,k          ! indice of the currend mesh point
+    integer, dimension(direction)           :: ind_group    ! indice of the currend group of line ((i,k) by default)
+    real(WP), dimension(N_proc(direction))  :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(direction))  :: p_V          ! particles velocity 
+    logical, dimension(bl_nb(direction)+1)  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction))    :: bl_tag       ! indice of tagged particles
+
+    ind_group = 0
+    do k = 1, N_proc(3)
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecY_init(Vy, i, k, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block(dt, direction, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Yremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,k,scal3D)
+       end do
+    end do
+
+
+  end subroutine advecY_calc_line
+
+
+  !> Advection during a time step dt - order 2
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vy      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of group along current direction (ie along Y-axis)
+  subroutine advecY_calc_group(dt,Vy,scal3D,gs)
+
+    ! input/output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vy
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    integer, dimension(2), intent(in)                                   :: gs   ! size of lines group along Y
+    ! Other local variables
+    integer                                             :: i,k          ! indice of the currend mesh point
+    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_V          ! particles velocity 
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+
+    ind_group = 0
+
+    do k = 1, N_proc(3), gs(2)
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1), gs(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecY_init(Vy, i, k, gs, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Yremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag, i,k,scal3D)
+
+       end do
+    end do
+
+  end subroutine advecY_calc_group
+
+
+  !> Advection during a time step dt - order 4
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vy      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of group along current direction (ie along Y-axis)
+  subroutine advecY_calc_O4(dt,Vy,scal3D,gs)
+
+    ! input/output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vy
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    integer, dimension(2), intent(in)                                   :: gs   ! size of lines group along Y
+    ! Other local variables
+    integer                                             :: i,k          ! indice of the currend mesh point
+    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_V          ! particles velocity 
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+
+    ind_group = 0
+
+    do k = 1, N_proc(3), gs(2)
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1), gs(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecY_init(Vy, i, k, gs, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Yremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag, i,k,scal3D)
+
+       end do
+    end do
+
+  end subroutine advecY_calc_O4
+
+
+  !> Advection during a time step dt - M'6, method without corrections.
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vy      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of group along current direction (ie along Y-axis)
+  subroutine advecY_calc_Mprime6(dt,Vy,scal3D,gs)
+
+    ! input/output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vy
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    integer, dimension(2), intent(in)                                   :: gs   ! size of lines group along Y
+    ! Other local variables
+    integer                                             :: i,k          ! indice of the currend mesh point
+    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_V          ! particles velocity 
+
+    ind_group = 0
+
+    do k = 1, N_proc(3), gs(2)
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1), gs(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecY_init(Vy, i, k, gs, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Remeshing --
+          call Yremesh_Mprime6(ind_group, gs, p_pos_adim, i,k,scal3D)
+
+       end do
+    end do
+
+  end subroutine advecY_calc_Mprime6
+
+
+  ! ====================================================================
+  ! ====================   Remeshing subroutines    ====================
+  ! ====================================================================
+
+  !> remeshing with an order 2 method, corrected to allow large CFL number - untagged particles
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        i,k         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Yremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,k,scal)
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, intent(in)                                                 :: i, k
+    logical, dimension(:), intent(in)                                   :: bl_type
+    logical, dimension(:), intent(in)                                   :: bl_tag
+    real(WP), dimension(:), intent(in)                                  :: p_pos_adim
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
+    ! Other local variables 
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it 
+    ! to the right subdomain
+    integer, dimension(2)               :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
+    ! I will receive data
+    integer                             :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer                             :: proc_max     ! smaller gap between me and the processes to where I send data
+
+    !  -- Compute ranges for remeshing of local particles --
+    if (bl_type(1)) then
+       ! First particle is a centered one
+       send_j_min = nint(p_pos_adim(1))-1
+    else
+       ! First particle is a left one
+       send_j_min = floor(p_pos_adim(1))-1
+    end if
+    if (bl_type(N_proc(direction)/bl_size +1)) then
+       ! Last particle is a centered one
+       send_j_max = nint(p_pos_adim(N_proc(direction)))+1
+    else
+       ! Last particle is a left one
+       send_j_max = floor(p_pos_adim(N_proc(direction)))+1
+    end if
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_line(send_j_min, send_j_max, direction, ind_group, proc_min, proc_max, rece_proc)
+
+    ! -- Allocate buffer for remeshing of local particles --
+    allocate(send_buffer(send_j_min:send_j_max))
+    send_buffer = 0.0;
+
+    ! -- Remesh the particles in the buffer --
+    call AC_remesh_lambda2corrected(direction, p_pos_adim, scal(i,:,k), bl_type, bl_tag, send_j_min, send_j_max, send_buffer)
+
+    ! -- Send the buffer to the matching processus and update the scalar field --
+    scal(i,:,k) = 0
+    call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min, proc_max, rece_proc, send_buffer, scal(i,:,k))
+
+    ! -- Deallocate all field --
+    deallocate(send_buffer)
+
+  end subroutine Yremesh_O2
+
+
+  !> remeshing with an order 2 method, corrected to allow large CFL number - group version
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        i,k         = indice of of the current line (y-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Yremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,k,scal)
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, dimension(2), intent(in)                                   :: gs
+    integer, intent(in)                                                 :: i, k
+    real(WP), dimension(N_proc(direction),gs(1),gs(2)), intent(in)      :: p_pos_adim   ! adimensionned particles position 
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2)), intent(in)      :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2)), intent(in)        :: bl_tag       ! indice of tagged particles
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
+    ! Other local variables 
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
+    ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+
+    integer                             :: i1, i2       ! indice of a line into the group
+
+    !  -- Compute ranges --
+    where (bl_type(1,:,:))
+       ! First particle is a centered one
+       send_group_min = nint(p_pos_adim(1,:,:))-1
+    elsewhere
+       ! First particle is a left one
+       send_group_min = floor(p_pos_adim(1,:,:))-1
+    end where
+    where (bl_type(N_proc(direction)/bl_size +1,:,:))
+       ! Last particle is a centered one
+       send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+1
+    elsewhere
+       ! Last particle is a left one
+       send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+1
+    end where
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc)
+
+    do i2 = 1, gs(2)
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
+
+          ! -- Allocate buffer for remeshing of local particles --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
+
+          ! -- Remesh the particles in the buffer --
+          call AC_remesh_lambda2corrected(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,:,k+i2-1), &
+               & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(i+i1-1,:,k+i2-1) = 0
+          call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,:,k+i2-1))
+
+          ! Deallocate all field
+          deallocate(send_buffer)
+
+       end do
+    end do
+
+  end subroutine Yremesh_O2_group
+
+
+  !> remeshing with an order 4 method, corrected to allow large CFL number - untagged particles
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned particles position
+  !!    @param[in]        bl_tag      = contains information about block (is it tagged ?)
+  !!    @param[in]        i,k         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Yremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,k,scal)
+
+    ! input/output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, dimension(2), intent(in)                                   :: gs
+    integer, intent(in)                                                 :: i, k
+    real(WP), dimension(N_proc(direction),gs(1),gs(2)), intent(in)      :: p_pos_adim   ! adimensionned particles position 
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2)), intent(in)      :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2)), intent(in)        :: bl_tag       ! indice of tagged particles
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
+    ! Other local variables 
+    ! Variables used to remesh particles ...
+    ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+    ! designes something I send (resp. I receive).
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
+    ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+    integer                             :: i1, i2       ! indice of a line into the group
+
+    !  -- Compute ranges --
+    where (bl_type(1,:,:))
+       ! First particle is a centered one
+       send_group_min = nint(p_pos_adim(1,:,:))-2
+    elsewhere
+       ! First particle is a left one
+       send_group_min = floor(p_pos_adim(1,:,:))-2
+    end where
+    where (bl_type(N_proc(direction)/bl_size +1,:,:))
+       ! Last particle is a centered one
+       send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+2
+    elsewhere
+       ! Last particle is a left one
+       send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+2
+    end where
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc)
+
+    do i2 = 1, gs(2)
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
+
+          ! -- Allocate buffer for remeshing of local particles --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
+
+          ! -- Remesh the particles in the buffer --
+          call AC_remesh_lambda4corrected(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,:,k+i2-1), &
+               & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(i+i1-1,:,k+i2-1) = 0
+          call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,:,k+i2-1))
+
+          ! Deallocate all field
+          deallocate(send_buffer)
+
+       end do
+    end do
+
+  end subroutine Yremesh_O4
+
+
+  !> remeshing with M'6 formula - No tag neither correction for large time steps.
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned particles position
+  !!    @param[in]        i,k         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Yremesh_Mprime6(ind_group, gs, p_pos_adim, i,k,scal)
+
+    ! input/output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, dimension(2), intent(in)                                   :: gs
+    integer, intent(in)                                                 :: i, k
+    real(WP), dimension(N_proc(direction),gs(1),gs(2)), intent(in)      :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
+    ! Other local variables 
+    ! Variables used to remesh particles ...
+    ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+    ! designes something I send (resp. I receive).
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
+    ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+    integer                             :: i1, i2       ! indice of a line into the group
+    integer                             :: ind_p        ! indice of the current particle
+
+    !  -- Compute the remeshing domain --
+    send_group_min = floor(p_pos_adim(1,:,:)-2)
+    send_group_max = floor(p_pos_adim(N_proc(direction),:,:)+3)
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc, 1)
+
+    do i2 = 1, gs(2)
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
+
+          !  -- Allocate and initialize the buffer --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
+
+          ! -- Remesh the particles in the buffer --
+          do ind_p = 1, N_proc(direction), 1
+             call AC_remesh_Mprime6(p_pos_adim(ind_p,i1,i2),scal(i+i1-1,ind_p,k+i2-1), send_buffer)
+          end do
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(i+i1-1,:,k+i2-1) = 0
+          call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,:,k+i2-1))
+
+          ! Deallocate all field
+          deallocate(send_buffer)
+
+       end do
+    end do
+
+  end subroutine Yremesh_Mprime6
+
+
+  ! ====================================================================
+  ! ====================    Initialize particle     ====================
+  ! ====================================================================
+
+  !> Creation and initialisation of a particle line (ie X and Z coordinate are fixed)
+  !!    @param[in]    Vy          = 3D velocity field
+  !!    @param[in]    i           = X-indice of the current line
+  !!    @param[in]    k           = Z-indice of the current line
+  !!    @param[out]   p_pos_adim  = adimensioned particles postion
+  !!    @param[out]   p_V         = particle velocity 
+  subroutine advecY_init_line(Vy, i, k, p_pos_adim, p_V)
+
+    ! input/output
+    integer, intent(in)                                 :: i,k
+    real(WP), dimension(N_proc(direction)), intent(out) :: p_pos_adim, p_V
+    real(WP), dimension(:,:,:), intent(in)              :: Vy
+    ! Other local variables
+    integer                                     :: ind          ! indice
+
+    do ind = 1, N_proc(direction)
+       p_pos_adim(ind) = ind
+       p_V(ind)        = Vy(i,ind,k)
+    end do
+
+  end subroutine advecY_init_line
+
+
+  !> Creation and initialisation of a group of particle line
+  !!    @param[in]    Vy          = 3D velocity field
+  !!    @param[in]    i           = X-indice of the current line
+  !!    @param[in]    k           = Z-indice of the current line
+  !!    @param[in]    Gsize       = size of groups (along Y direction)
+  !!    @param[out]   p_pos_adim  = adimensioned particles postion
+  !!    @param[out]   p_V         = particle velocity 
+  subroutine advecY_init_group(Vy, i, k, Gsize, p_pos_adim, p_V)
+
+    ! input/output
+    integer, intent(in)                                                     :: i,k
+    integer, dimension(2), intent(in)                                       :: Gsize
+    real(WP), dimension(N_proc(direction),Gsize(1),Gsize(2)),intent(out)    :: p_pos_adim, p_V
+    real(WP), dimension(:,:,:), intent(in)                                  :: Vy
+    ! Other local variables
+    integer                                     :: ind          ! indice
+    integer                                     :: i_gp, k_gp   ! Y and Z indice of the current line in the group
+
+    do k_gp = 1, Gsize(2)
+       do i_gp = 1, Gsize(1)
+          do ind = 1, N_proc(direction)
+             p_pos_adim(ind, i_gp, k_gp) = ind
+             p_V(ind, i_gp, k_gp)        = Vy(i+i_gp-1,ind,k+k_gp-1)
+          end do
+       end do
+    end do
+
+  end subroutine advecY_init_group
+
+end module advecY
+!> @}
diff --git a/HySoP/src/scalesInterface/particles/advecZ.f90 b/HySoP/src/scalesInterface/particles/advecZ.f90
new file mode 100644
index 000000000..8bbf5f4a9
--- /dev/null
+++ b/HySoP/src/scalesInterface/particles/advecZ.f90
@@ -0,0 +1,657 @@
+!> @addtogroup part
+!! @{
+
+!------------------------------------------------------------------------------
+!
+! MODULE: advecZ
+!
+!
+! DESCRIPTION: 
+!> The module advecZ is devoted to the advection along Z axis of a scalar field.
+!! It used particle method and provide a parallel implementation.
+!
+!> @details
+!! This module is a part of the advection solver based on particles method.
+!! The solver use some dimensionnal splitting and this module contains all the
+!! method used to solve advection along the Z-axis. This is a parallel
+!! implementation using MPI and the cartesien topology it provides.
+!!
+!! This module can use the method and variables defined in the module
+!! "advec_common" which gather information and tools shared for advection along
+!! x, y and z-axis.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advecZ
+
+  use precision
+  use advec_remeshing_formula
+
+  implicit none
+
+  ! ===== Private variables =====
+  ! Minimal and maximal indice of the buffer used in the different communication
+  !> minimal indice of the send buffer
+  !integer, public         :: send_j_min
+  !> maximal indice of the send buffer
+  !integer, public         :: send_j_max
+
+  ! ===== Public procedures =====
+  !> Generique procedure to advect the scalar with a particles solver
+  public                  :: advecZ_calc
+  !----- (corrected) Remeshing method (these methods are set to public in validation purposes) -----
+  public                  :: Zremesh_O2       ! order 2
+  public                  :: Zremesh_O4       ! order 4
+  ! ----- Other remeshing formula -----
+  public                  :: Zremesh_Mprime6  
+
+  ! ===== Private porcedures =====
+  ! particles solver with different remeshing formula
+  private                 :: advecZ_calc_O2       ! remeshing method at order 2
+  private                 :: advecZ_calc_O2_group ! remeshing method at order 2
+  private                 :: advecZ_calc_Mprime6  ! M'6 remeshing method
+  ! Particles initialisation
+  private                 :: advecZ_init      ! generic initialization of particle position, velocity.
+  private                 :: advecZ_init_line ! initialisation for only one line of particles
+  private                 :: advecZ_init_group! initialisation for a group of line of particles
+
+  ! ===== Private variable ====
+  !> Size of group of particles line along Z
+  integer, dimension(2), private  :: gpZ_size     
+  !> Current direction = 3 ie along Z
+  integer, parameter, private     :: direction = 3
+
+  interface advecZ_init
+     module procedure advecZ_init_line, advecZ_init_group
+  end interface advecZ_init
+
+contains
+
+  ! #####################################################################################
+  ! #####                                                                           #####
+  ! #####                         Public procedure                                  #####
+  ! #####                                                                           #####
+  ! #####################################################################################
+
+  ! ====================================================================
+  ! ====================    Generic solveur         ====================
+  ! ====================================================================
+
+  !> Scalar advection (this procedure call the right solver, depending on the simulation setup) 
+  !!    @param[in]        dt          = time step
+  !!    @param[in]        Vz          = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    SC          = scalar field to advect
+  !!    @param[in]        type_solver = scheme use for the advection (particle with order 2 or 4)
+  subroutine advecZ_calc(dt, Vz, SC, type_solver)
+
+    ! Input/Output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vz
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: SC
+    character(len=*), intent(in)                                :: type_solver
+
+    ! Allocate send_ind_min/max
+    if(allocated(send_group_min)) deallocate(send_group_min)
+    allocate(send_group_min(group_size(direction,1),group_size(direction,2)))
+    if(allocated(send_group_max)) deallocate(send_group_max)
+    allocate(send_group_max(group_size(direction,1),group_size(direction,2)))
+
+    ! Call the right solver depending on the space order we want to.
+    select case(type_solver)
+    case('p_O2')
+       call advecZ_calc_O2_group(dt, Vz, SC,group_size(direction,:))
+       !call advecZ_calc_O2(dt, Vz, SC)
+    case('p_O4')
+       call advecZ_calc_O4(dt, Vz, SC,group_size(direction,:))
+    case('p_M6')
+       call advecZ_calc_Mprime6(dt, Vz, SC,group_size(direction,:))
+    case default
+       call advecZ_calc_O2_group(dt, Vz, SC,group_size(direction,:))
+       !call advecZ_calc_O2(dt, Vz, SC)
+    end select
+
+  end subroutine advecZ_calc
+
+
+  ! #####################################################################################
+  ! #####                                                                           #####
+  ! #####                         Private procedure                                 #####
+  ! #####                                                                           #####
+  ! #####################################################################################
+
+  ! ====================================================================
+  ! ====================    Different solvers       ====================
+  ! ====================================================================
+
+
+  !> Advection during a time step dt - order 2
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vz      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D   = scalar field to advect
+  subroutine advecZ_calc_O2(dt,Vz,scal3D)
+
+    ! Input/Output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vz
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    ! Other local variables
+    integer                                 :: i,j          ! indice of the currend mesh point
+    integer, dimension(2)                   :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP), dimension(N_proc(direction))  :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(direction))  :: p_V          ! particles velocity 
+    logical, dimension(bl_nb(direction)+1)  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction))    :: bl_tag       ! indice of tagged particles
+
+    ind_group = 0
+    do j = 1, N_proc(2)
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecZ_init(Vz, i, j, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block(dt, direction, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Zremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,j,scal3D)
+
+       end do
+    end do
+
+  end subroutine advecZ_calc_O2
+
+
+  !> Advection during a time step dt - order 2
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vz      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of groups (along Z direction)
+  subroutine advecZ_calc_O2_group(dt,Vz,scal3D,gs)
+
+    ! Input/Output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vz
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    integer, dimension(2), intent(in)                                   :: gs   ! size of lines group along Y
+    ! Other local variables
+    integer                                             :: i,j          ! indice of the currend mesh point
+    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_V          ! particles velocity 
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+
+    ind_group = 0
+
+    do j = 1, N_proc(2), gs(2)
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1), gs(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecZ_init_group(Vz, i, j, gs, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Zremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag, i,j,scal3D)
+
+       end do
+    end do
+
+
+  end subroutine advecZ_calc_O2_group
+
+
+  !> Advection during a time step dt - order 4
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vz      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of groups (along Z direction)
+  subroutine advecZ_calc_O4(dt,Vz,scal3D,gs)
+
+    ! Input/Output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vz
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    integer, dimension(2), intent(in)                                   :: gs   ! size of lines group along Y
+    ! Other local variables
+    integer                                             :: i,j          ! indice of the currend mesh point
+    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_V          ! particles velocity 
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+
+    ind_group = 0
+
+    do j = 1, N_proc(2), gs(2)
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1), gs(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecZ_init_group(Vz, i, j, gs, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Zremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag, i,j,scal3D)
+
+       end do
+    end do
+
+
+  end subroutine advecZ_calc_O4
+
+
+  !> Advection during a time step dt - M'6 remeshing formula - no tag, no type
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vz      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of groups (along Z direction)
+  subroutine advecZ_calc_Mprime6(dt,Vz,scal3D,gs)
+
+    ! Input/Output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vz
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal3D
+    integer, dimension(2), intent(in)                                   :: gs   ! size of lines group along Y
+    ! Other local variables
+    integer                                             :: i,j          ! indice of the currend mesh point
+    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))  :: p_V          ! particles velocity 
+
+    ind_group = 0
+
+    do j = 1, N_proc(2), gs(2)
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1), gs(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecZ_init_group(Vz, i, j, gs, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Remeshing --
+          call Zremesh_Mprime6(ind_group, gs, p_pos_adim, i,j,scal3D)
+
+       end do
+    end do
+
+
+  end subroutine advecZ_calc_Mprime6
+
+
+  ! ====================================================================
+  ! ====================   Remeshing subroutines    ====================
+  ! ====================================================================
+
+  !> remeshing with an order 2 method, corrected to allow large CFL number - untagged particles
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        i,j         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Zremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,j,scal)
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, intent(in)                                                 :: i, j
+    logical, dimension(:), intent(in)                                   :: bl_type
+    logical, dimension(:), intent(in)                                   :: bl_tag
+    real(WP), dimension(:), intent(in)                                  :: p_pos_adim
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
+    ! Other local variables 
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it
+    ! to the right subdomain
+    integer, dimension(2)               :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
+    ! I will receive data
+    integer                             :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer                             :: proc_max     ! smaller gap between me and the processes to where I send data
+
+
+    !  -- Compute ranges for remeshing of local particles --
+    if (bl_type(1)) then
+       ! First particle is a centered one
+       send_j_min = nint(p_pos_adim(1))-1
+    else
+       ! First particle is a left one
+       send_j_min = floor(p_pos_adim(1))-1
+    end if
+    if (bl_type(N_proc(direction)/bl_size +1)) then
+       ! Last particle is a centered one
+       send_j_max = nint(p_pos_adim(N_proc(direction)))+1
+    else
+       ! Last particle is a left one
+       send_j_max = floor(p_pos_adim(N_proc(direction)))+1
+    end if
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_line(send_j_min, send_j_max, direction, ind_group, proc_min, proc_max, rece_proc)
+
+    !  -- Allocate and initialize the buffer --
+    allocate(send_buffer(send_j_min:send_j_max))
+    send_buffer = 0.0;
+
+    ! -- Remesh the particles in the buffer --
+    call AC_remesh_lambda2corrected(direction, p_pos_adim, scal(i,j,:), bl_type, bl_tag, send_j_min, send_j_max, send_buffer)
+
+    ! -- Send the buffer to the matching processus and update the scalar field --
+    scal(i,j,:) = 0
+    call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min, proc_max, rece_proc, send_buffer, scal(i,j,:))
+
+    ! Deallocate all field
+    deallocate(send_buffer)
+
+  end subroutine Zremesh_O2
+
+
+  !> remeshing with an order 2 method, corrected to allow large CFL number - group version
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        i,j         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Zremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,j,scal)
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, dimension(2), intent(in)                                   :: gs
+    integer, intent(in)                                                 :: i, j
+    real(WP), dimension(N_proc(direction),gs(1),gs(2)), intent(in)      :: p_pos_adim   ! adimensionned particles position 
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2)), intent(in)      :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2)), intent(in)        :: bl_tag       ! indice of tagged particles
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
+    ! Other local variables 
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
+    ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+
+    integer                             :: i1, i2       ! indice of a line into the group
+
+    !  -- Compute ranges --
+    where (bl_type(1,:,:))
+       ! First particle is a centered one
+       send_group_min = nint(p_pos_adim(1,:,:))-1
+    elsewhere
+       ! First particle is a left one
+       send_group_min = floor(p_pos_adim(1,:,:))-1
+    end where
+    where (bl_type(N_proc(direction)/bl_size +1,:,:))
+       ! Last particle is a centered one
+       send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+1
+    elsewhere
+       ! Last particle is a left one
+       send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+1
+    end where
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc)
+
+    do i2 = 1, gs(2)
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
+
+          ! -- Allocate buffer for remeshing of local particles --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
+
+          ! -- Remesh the particles in the buffer --
+          call AC_remesh_lambda2corrected(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,j+i2-1,:), &
+               & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(i+i1-1,j+i2-1,:) = 0
+          call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,j+i2-1,:))
+
+          ! Deallocate all field
+          deallocate(send_buffer)
+
+       end do
+    end do
+
+  end subroutine Zremesh_O2_group
+
+
+  !> remeshing with an order 4 method, corrected to allow large CFL number - untagged particles
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned particles position
+  !!    @param[in]        bl_tag      = contains information about block (is it tagged ?)
+  !!    @param[in]        i,j         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in,out]    scal          = scalar field to advect
+  subroutine Zremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,j,scal)
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, dimension(2), intent(in)                                   :: gs
+    integer, intent(in)                                                 :: i, j
+    real(WP), dimension(N_proc(direction),gs(1),gs(2)), intent(in)      :: p_pos_adim   ! adimensionned particles position 
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2)), intent(in)      :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2)), intent(in)        :: bl_tag       ! indice of tagged particles
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
+    ! Other local variables
+    ! Variables used to remesh particles ...
+    ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+    ! designes something I send (resp. I receive).
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
+    ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+    integer                             :: i1, i2       ! indice of a line into the group
+
+    !  -- Compute ranges --
+    where (bl_type(1,:,:))
+       ! First particle is a centered one
+       send_group_min = nint(p_pos_adim(1,:,:))-2
+    elsewhere
+       ! First particle is a left one
+       send_group_min = floor(p_pos_adim(1,:,:))-2
+    end where
+    where (bl_type(N_proc(direction)/bl_size +1,:,:))
+       ! Last particle is a centered one
+       send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+2
+    elsewhere
+       ! Last particle is a left one
+       send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+2
+    end where
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc)
+
+    do i2 = 1, gs(2)
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
+
+          ! -- Allocate buffer for remeshing of local particles --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
+
+          ! -- Remesh the particles in the buffer --
+          call AC_remesh_lambda4corrected(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,j+i2-1,:), &
+               & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(i+i1-1,j+i2-1,:) = 0
+          call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,j+i2-1,:))
+
+          ! Deallocate all field
+          deallocate(send_buffer)
+
+       end do
+    end do
+
+  end subroutine Zremesh_O4
+
+
+  !> remeshing with M'6 formula - No tag neither correction for large time steps.
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned particles position
+  !!    @param[in]        i,j         = indice of of the current line (x-coordinate and y-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Zremesh_Mprime6(ind_group, gs, p_pos_adim, i,j,scal)
+
+    ! input/output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, dimension(2), intent(in)                                   :: gs
+    integer, intent(in)                                                 :: i, j
+    real(WP), dimension(N_proc(direction),gs(1),gs(2)), intent(in)      :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
+    ! Other local variables 
+    ! Variables used to remesh particles ...
+    ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+    ! designes something I send (resp. I receive).
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
+    ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+    integer                             :: i1, i2       ! indice of a line into the group
+    integer                             :: ind_p        ! indice of the current particle
+
+    !  -- Compute the remeshing domain --
+    send_group_min = floor(p_pos_adim(1,:,:)-2)
+    send_group_max = floor(p_pos_adim(N_proc(direction),:,:)+3)
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc, 1)
+
+    do i2 = 1, gs(2)
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
+
+          !  -- Allocate and initialize the buffer --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
+
+          ! -- Remesh the particles in the buffer --
+          do ind_p = 1, N_proc(direction), 1
+             call AC_remesh_Mprime6(p_pos_adim(ind_p,i1,i2),scal(i+i1-1,j+i2-1, ind_p), send_buffer)
+          end do
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(i+i1-1,j+i2-1,:) = 0
+          call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,j+i2-1,:))
+
+          ! Deallocate all field
+          deallocate(send_buffer)
+
+       end do
+    end do
+
+  end subroutine Zremesh_Mprime6
+
+
+  ! ====================================================================
+  ! ====================    Initialize particle     ====================
+  ! ====================================================================
+
+  !> Creation and initialisation of a particle line (ie X and Y coordinate are fixed)
+  !!    @param[in]    Vz          = 3D velocity field
+  !!    @param[in]    i           = X-indice of the current line
+  !!    @param[in]    j           = Y-indice of the current line
+  !!    @param[out]   p_pos_adim  = adimensioned particles postion
+  !!    @param[out]   p_V         = particle velocity 
+  subroutine advecZ_init_line(Vz, i, j, p_pos_adim, p_V)
+
+    ! Input/Output
+    integer, intent(in)                                 :: i,j
+    real(WP), dimension(N_proc(direction)), intent(out) :: p_pos_adim, p_V
+    real(WP), dimension(:,:,:), intent(in)              :: Vz
+    ! Other local variables
+    integer                                             :: ind  ! indice
+
+    do ind = 1, N_proc(direction)
+       p_pos_adim(ind) = ind
+       p_V(ind)        = Vz(i,j,ind)
+    end do
+
+  end subroutine advecZ_init_line
+
+
+  !> Creation and initialisation of a group of particle line
+  !!    @param[in]    Vz          = 3D velocity field
+  !!    @param[in]    i           = X-indice of the current line
+  !!    @param[in]    j           = Y-indice of the current line
+  !!    @param[in]    Gsize       = size of groups (along Z direction)
+  !!    @param[out]   p_pos_adim  = adimensioned particles postion
+  !!    @param[out]   p_V         = particle velocity 
+  subroutine advecZ_init_group(Vz, i, j, Gsize, p_pos_adim, p_V)
+
+    ! Input/Output
+    integer, intent(in)                                                     :: i,j
+    integer, dimension(2), intent(in)                                       :: Gsize
+    real(WP), dimension(N_proc(direction),Gsize(1),Gsize(2)),intent(out)    :: p_pos_adim, p_V
+    real(WP), dimension(:,:,:), intent(in)                                  :: Vz
+    ! Other local variables
+    integer                                     :: ind          ! indice
+    integer                                     :: i_gp, j_gp   ! X and Y indice of the current line in the group
+
+    do j_gp = 1, Gsize(2)
+       do i_gp = 1, Gsize(1)
+          do ind = 1, N_proc(direction)
+             p_pos_adim(ind, i_gp, j_gp) = ind
+             p_V(ind, i_gp, j_gp)        = Vz(i+(i_gp-1),j+(j_gp-1), ind)
+          end do
+       end do
+    end do
+
+  end subroutine advecZ_init_group
+
+end module advecZ
+!> @}
diff --git a/HySoP/src/scalesInterface/particles/advec_common.f90 b/HySoP/src/scalesInterface/particles/advec_common.f90
new file mode 100644
index 000000000..48433ced4
--- /dev/null
+++ b/HySoP/src/scalesInterface/particles/advec_common.f90
@@ -0,0 +1,2292 @@
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_common
+!
+!
+! DESCRIPTION: 
+!> The module ``advec_common'' gather function and subroutines used to advec scalar
+!! which are not specific to a direction
+!! @details
+!! This module gathers functions and routines used to advec scalar which are not
+!! specific to a direction. This is a parallel implementation using MPI and 
+!! the cartesien topology it provides. It also contains the variables common to 
+!! the solver along each direction and other generic variables used for the
+!! advection based on the particle method.
+!! 
+!! Except for testing purpose, this module is not supposed to be used by the
+!! main code but only by the other advection module. More precisly, an final user
+!! must only used the generic "advec" module wich contain all the interface to
+!! solve the advection equation with the particle method, and to choose the
+!! remeshing formula, the dimensionnal splitting and everything else.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_common
+
+  use precision
+  use cart_topology
+  use advec_variables!, only : real_pter
+  use advec_common_line
+
+  implicit none
+
+  ! XXX Si passage au fortran 2003 : basculer toutes ces variables dans le module
+  ! advec (fichier advec.F90) et mettre toutes les variables en protected.
+  ! Seul la procédure "advec_init" doit pouvoir les modifier, mais de nombreuses
+  ! procédures doivent pouvoir y accéder.
+
+
+  ! Information about the particles and their bloc
+  public
+
+
+  ! ===== Public procedures =====
+  !----- Determine block type and tag particles -----
+  public                              :: AC_type_and_block
+  public                              :: AC_type_and_block_group
+  !----- To interpolate velocity -----
+  public                              :: AC_obtain_receivers
+  public                              :: AC_particle_velocity
+  private                             :: AC_velocity_interpol_group
+  !----- To remesh particles -----
+  public                              :: AC_obtain_senders
+  private                             :: AC_obtain_senders_group
+  private                             :: AC_obtain_senders_com
+  public                              :: AC_bufferToScalar
+  
+  interface AC_particle_velocity
+     module procedure AC_particle_velocity_line, AC_velocity_interpol_group
+  end interface AC_particle_velocity
+
+  interface AC_type_and_block
+     module procedure  AC_type_and_block_line, AC_type_and_block_group
+  end interface AC_type_and_block
+
+  !> Determine the set of processes wich will send me information during the
+  !!  scalar remeshing 
+  interface AC_obtain_senders
+     module procedure AC_obtain_senders_line, AC_obtain_senders_com, &
+          & AC_obtain_senders_group
+  end interface AC_obtain_senders
+  
+  interface AC_bufferToScalar
+     module procedure AC_bufferToScalar_line
+  end interface AC_bufferToScalar
+
+
+contains
+
+  ! ===== Public procedure =====
+
+
+  ! ==================================================================================
+  ! ====================     Compute particle velocity (RK2)      ====================
+  ! ==================================================================================
+
+  !> Interpolate the velocity field used in a RK2 scheme for particle advection.
+  !!    @param[in]        dt          = time step
+  !!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        p_pos_adim  = adimensionned particle postion
+  !!    @param[in,out]    p_V         = particle velocity (along the current direction)
+  !! @details
+  !!    A RK2 scheme is used to advect the particles : the midlle point scheme. An
+  !!    intermediary position "p_pos_bis(i) = p_pos(i) + V(i)*dt/2" is computed and then 
+  !!    the numerical velocity of each particles is computed as the interpolation of V  in 
+  !!    this point. This field is used to advect the particles at the seconde order in time : 
+  !!    p_pos(t+dt, i) = p_pos(i) + p_V(i).
+  !!    The group line indice is used to ensure using unicity of each mpi message tag.
+  subroutine AC_particle_velocity_line(dt, direction, ind_group, p_pos_adim, p_V)
+
+    ! This code involve a recopy of p_V. It is possible to directly use the 3D velocity field but in a such code
+    ! a memory copy is still needed to send velocity field to other processus : mpi send contiguous memory values
+
+    ! Input/Ouput
+    real(WP), intent(in)                            :: dt       ! time step
+    integer, intent(in)                             :: direction
+    integer, dimension(2), intent(in)               :: ind_group
+    real(WP), dimension(:), intent(in)              :: p_pos_adim
+    real(WP), dimension(:), intent(inout)           :: p_V
+    ! Others, local
+    real(WP), dimension(N_proc(direction))          :: p_pos_bis    ! adimensionned position of the middle point
+    real(WP), dimension(N_proc(direction)), target  :: p_V_bis      ! velocity of the middle point
+    real(WP), dimension(N_proc(direction))          :: weight       ! interpolation weight
+    type(real_pter), dimension(N_proc(direction))   :: Vp, Vm       ! Velocity on previous and next mesh point
+    real(WP), dimension(:), allocatable, target     :: V_buffer     ! Velocity buffer for postion outside of the local subdomain
+    integer                                         :: size_buffer  ! buffer size
+    integer                                         :: rece_ind_min ! the minimal indice used in velocity interpolation
+    integer                                         :: rece_ind_max ! the maximal indice used in velocity interpolation
+    integer                                         :: ind, ind_com ! indices
+    integer                                         :: pos, pos_old ! indices of the mesh point wich preceed the particle position
+    integer                                         :: proc_gap, gap! distance between my (mpi) coordonate and coordinate of the 
+    ! processus associated to a given position
+    integer, dimension(:), allocatable              :: rece_rank    ! rank of processus wich send me information
+    integer                                         :: send_rank    ! rank of processus to wich I send information
+    integer                                         :: rankP        ! rank of processus ("source rank" returned by mpi_cart_shift)
+    integer, dimension(2)                           :: rece_range   ! range of the velocity fields I want to receive
+    integer, dimension(2)                           :: send_range   ! range of the velocity fields I send
+    integer, dimension(2)                           :: rece_gap     ! distance between me and processus wich send me information
+    integer, dimension(2)                           :: send_gap     ! distance between me and processus to wich I send information
+    integer                                         :: msg_size     ! size of message send/receive
+    integer                                         :: tag          ! mpi message tag
+    integer                                         :: ierr         ! mpi error code
+    integer, dimension(:), allocatable              :: s_request    ! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable              :: s_request_bis! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable              :: rece_request ! mpi communication request (handle) of nonblocking receive
+    integer, dimension(MPI_STATUS_SIZE)             :: rece_status  ! mpi status (for mpi_wait)
+
+    ! -- Initialisation --
+    ind_com = 0
+    do ind = 1, N_proc(direction)
+       nullify(Vp(ind)%pter)
+       nullify(Vm(ind)%pter)
+    end do
+    ! Compute the midlle point
+    p_pos_bis = p_pos_adim + (dt/2.0)*p_V/d_sc(direction)
+    p_V_bis = p_V
+    ! Compute range of the set of point where I need the velocity value
+    rece_ind_min = floor(p_pos_bis(1))
+    rece_ind_max = floor(p_pos_bis(N_proc(direction))) + 1
+    ! Allocate the buffer
+    ! If rece_ind_min and rece_ind_max are not in [N_proc(direction);1] then it will change the number of communication
+    ! size_buffer = max(temp - N_proc(direction), 0) - min(0, temp)
+    !size_buffer = - max(temp - N_proc(direction), 0) - min(0, temp)
+    ! It must work, but for first test we prefer compute size_buffer more simply
+    size_buffer = 0
+
+    ! -- Exchange non blocking message to do the computations during the
+    ! communication process --
+    call AC_obtain_receivers(direction, ind_group, rece_ind_min, rece_ind_max, send_gap, rece_gap)
+    allocate(rece_rank(rece_gap(1):rece_gap(2)))
+    ! Send messages about what I want
+    allocate(s_request_bis(rece_gap(1):rece_gap(2)))
+    do proc_gap = rece_gap(1), rece_gap(2)
+       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rece_rank(proc_gap), ierr)
+       if (rece_rank(proc_gap) /= D_rank(direction)) then
+          ! Range I want
+          gap = proc_gap*N_proc(direction)
+          rece_range(1) = max(rece_ind_min, gap+1) ! fortran => indice start from 0
+          rece_range(2) = min(rece_ind_max, gap+N_proc(direction)) 
+          ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.                          
+          tag = compute_tag(ind_group, tag_velo_range, direction, proc_gap)
+          ! Send message
+          size_buffer = size_buffer + (rece_range(2)-rece_range(1)) + 1
+          call mpi_Isend(rece_range(1), 2, MPI_INTEGER, rece_rank(proc_gap), tag, D_comm(direction), s_request_bis(proc_gap),ierr)
+       end if
+    end do
+    allocate(V_buffer(max(size_buffer,1)))
+    V_buffer = 0
+    ! Send the velocity field to processus which need it
+    allocate(s_request(send_gap(1):send_gap(2)))
+    do proc_gap = send_gap(1), send_gap(2)
+       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+       if (send_rank /= D_rank(direction)) then
+          ! I - Receive messages about what I have to send
+          ! Ia - Compute reception tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.             
+          tag = compute_tag(ind_group, tag_velo_range, direction, -proc_gap)
+          ! Ib - Receive the message
+          call mpi_recv(send_range(1), 2, MPI_INTEGER, send_rank, tag, D_comm(direction), rece_status, ierr)
+          send_range = send_range + proc_gap*N_proc(direction)
+          ! II - Send it
+          ! IIa - Compute send tag
+          tag = compute_tag(ind_group, tag_velo_V, direction, proc_gap)
+          ! IIb - Send message
+          call mpi_Isend(p_V(send_range(1)), send_range(2)-send_range(1)+1, MPI_DOUBLE_PRECISION, &
+               & send_rank, tag, D_comm(direction), s_request(proc_gap), ierr)
+       end if
+    end do
+
+    ! Non blocking reception of the velocity field
+    ind = 1
+    allocate(rece_request(rece_gap(1):rece_gap(2)))
+    do proc_gap = rece_gap(1), rece_gap(2)
+       if (rece_rank(proc_gap) /= D_rank(direction)) then
+          ! IIa - Compute reception tag
+          tag = compute_tag(ind_group, tag_velo_V, direction, -proc_gap)
+          ! IIb - Receive message
+          gap = proc_gap*N_proc(direction)
+          rece_range(1) = max(rece_ind_min, gap+1) ! fortran => indice start from 0
+          rece_range(2) = min(rece_ind_max, gap+N_proc(direction)) 
+          msg_size = rece_range(2)-rece_range(1)+1
+          call mpi_Irecv(V_buffer(ind), msg_size, MPI_DOUBLE_PRECISION, rece_rank(proc_gap), tag, D_comm(direction), &
+               & rece_request(proc_gap), ierr) 
+          ind = ind + msg_size
+       end if
+    end do
+
+    ! -- Compute the interpolated velocity 
+    ! Compute the interpolation weight and update the pointers Vp and Vm
+    ! Initialisation of reccurence process
+    ind = 1
+    pos = floor(p_pos_bis(ind))
+    weight(ind) = p_pos_bis(ind)-pos
+    ! Vm = V(pos)
+    proc_gap = floor(real(pos-1)/N_proc(direction)) 
+    call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+    if (send_rank == D_rank(direction)) then
+       Vm(ind)%pter => p_V_bis(pos-proc_gap*N_proc(direction))
+    else 
+       ind_com = ind_com + 1
+       Vm(ind)%pter => V_buffer(ind_com)
+    end if
+    ! Vp = V(pos+1)
+    proc_gap = floor(real(pos+1-1)/N_proc(direction)) 
+    call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+    if (send_rank == D_rank(direction)) then
+       Vp(ind)%pter => p_V_bis(pos+1-proc_gap*N_proc(direction))
+    else 
+       ind_com = ind_com + 1
+       Vp(ind)%pter => V_buffer(ind_com)
+    end if
+    pos_old = pos
+
+    ! Following indice : we use previous work (already done)
+    do ind = 2, N_proc(direction)
+       pos = floor(p_pos_bis(ind))
+       weight(ind) = p_pos_bis(ind)-pos
+       select case(pos-pos_old)
+       case(0)
+          ! The particle belongs to the same segment than the previous one
+          Vm(ind)%pter => Vm(ind-1)%pter
+          Vp(ind)%pter => Vp(ind-1)%pter
+       case(1)
+          ! The particle follows the previous one
+          Vm(ind)%pter => Vp(ind-1)%pter
+          ! Vp = V(pos+1)
+          proc_gap = floor(real(pos+1-1)/N_proc(direction)) ! fortran -> indice starts from 1
+          call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+          if (send_rank == D_rank(direction)) then
+             Vp(ind)%pter => p_V_bis(pos+1-proc_gap*N_proc(direction))
+          else 
+             ind_com = ind_com + 1
+             Vp(ind)%pter => V_buffer(ind_com)
+          end if
+       case(2)
+          ! pos = pos_old +2, wich correspond to "extention"
+          ! Vm = V(pos)
+          proc_gap = floor(real(pos-1)/N_proc(direction)) 
+          call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+          if (send_rank == D_rank(direction)) then
+             Vm(ind)%pter => p_V_bis(pos-proc_gap*N_proc(direction))
+          else 
+             ind_com = ind_com + 1
+             Vm(ind)%pter => V_buffer(ind_com)
+          end if
+          ! Vp = V(pos+1)
+          proc_gap = floor(real(pos+1-1)/N_proc(direction)) 
+          call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+          if (send_rank == D_rank(direction)) then
+             Vp(ind)%pter => p_V_bis(pos+1-proc_gap*N_proc(direction))
+          else 
+             ind_com = ind_com + 1
+             Vp(ind)%pter => V_buffer(ind_com)
+          end if
+       case default
+          print*, "unexpected case : pos = ", pos, " , pos_old = ", pos_old, " ind = ", ind
+       end select
+       pos_old = pos
+    end do
+
+    ! -- Compute the interpolate velocity --
+    ! Check if communication are done
+    do proc_gap = rece_gap(1), rece_gap(2)
+       if (rece_rank(proc_gap)/=D_rank(direction)) then
+          call mpi_wait(rece_request(proc_gap), rece_status, ierr)
+       end if
+    end do
+
+
+    ! Then compute the field
+    do ind = 1, N_proc(direction)
+       p_V(ind) = weight(ind)*Vp(ind)%pter + (1-weight(ind))*Vm(ind)%pter
+    end do
+
+    do ind = 1, N_proc(direction)
+       nullify(Vp(ind)%pter)
+       nullify(Vm(ind)%pter)
+    end do
+
+    do proc_gap = send_gap(1), send_gap(2)
+       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+       if (send_rank /= D_rank(direction)) then
+          call MPI_WAIT(s_request(proc_gap),rece_status,ierr)
+       end if
+    end do
+    deallocate(s_request)
+    do proc_gap = rece_gap(1), rece_gap(2)
+       if (rece_rank(proc_gap) /= D_rank(direction)) then
+          call MPI_WAIT(s_request_bis(proc_gap),rece_status,ierr)
+       end if
+    end do
+    deallocate(s_request_bis)
+
+    ! Deallocation    
+    deallocate(rece_rank)
+    deallocate(rece_request)
+    deallocate(V_buffer)
+
+  end subroutine AC_particle_velocity_line
+
+
+  !> Interpolate the velocity field used in a RK2 scheme for particle advection -
+  !! version for a group of (more of one) line
+  !!    @param[in]        dt          = time step
+  !!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+  !!    @param[in]        gs          = size of a group (ie number of line it gathers along the two other directions)
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        p_pos_adim  = adimensionned particle postion
+  !!    @param[in,out]    p_V         = particle velocity (along the current direction)
+  !! @details
+  !!    A RK2 scheme is used to advect the particles : the midlle point scheme. An
+  !!    intermediary position "p_pos_bis(i) = p_pos(i) + V(i)*dt/2" is computed and then 
+  !!    the numerical velocity of each particles is computed as the interpolation of V  in 
+  !!    this point. This field is used to advect the particles at the seconde order in time : 
+  !!    p_pos(t+dt, i) = p_pos(i) + p_V(i).
+  !!    The group line indice is used to ensure using unicity of each mpi message tag.
+  !!    The interpolation is done for a group of lines, allowing to mutualise
+  !!    communications. Considering a group of Na X Nb lines, communication performed 
+  !!    by this algorithm are around (Na x Nb) bigger than the alogorithm wich
+  !!    works on a single line but also around (Na x Nb) less frequent.
+  subroutine AC_velocity_interpol_group(dt, direction, gs, ind_group, p_pos_adim, p_V)
+
+    ! This code involve a recopy of p_V. It is possible to directly use the 3D velocity field but in a such code
+    ! a memory copy is still needed to send velocity field to other processus : mpi send contiguous memory values
+
+    ! Input/Ouput
+    real(WP), intent(in)                            :: dt           ! time step
+    integer, intent(in)                             :: direction    ! current direction
+    integer, dimension(2),intent(in)                :: gs           ! groupe size
+    integer, dimension(2), intent(in)               :: ind_group
+    real(WP), dimension(:,:,:), intent(in)          :: p_pos_adim
+    real(WP), dimension(:,:,:), intent(inout)       :: p_V
+    ! Others, local
+    real(WP),dimension(N_proc(direction),gs(1),gs(2))           :: p_pos_bis    ! adimensionned position of the middle point
+    real(WP), dimension(N_proc(direction),gs(1),gs(2)),target   :: p_V_bis      ! velocity of the middle point
+    real(WP), dimension(N_proc(direction),gs(1),gs(2))          :: weight       ! interpolation weight
+    type(real_pter),dimension(N_proc(direction),gs(1),gs(2))    :: Vp, Vm       ! Velocity on previous and next mesh point
+    real(WP), dimension(:), allocatable, target                 :: V_buffer     ! Velocity buffer for postion outside of the local subdomain
+    integer, dimension(:), allocatable                          :: pos_in_buffer! buffer size
+    integer , dimension(gs(1), gs(2))           :: rece_ind_min ! minimal indice of mesh involved in remeshing particles (of my local subdomains)
+    integer , dimension(gs(1), gs(2))           :: rece_ind_max ! maximal indice of mesh involved in remeshing particles (of my local subdomains)
+    integer                                     :: ind, ind_com ! indices
+    integer                                     :: i1, i2       ! indices in the lines group
+    integer                                     :: pos, pos_old ! indices of the mesh point wich preceed the particle position
+    integer                                     :: proc_gap, gap! distance between my (mpi) coordonate and coordinate of the 
+    ! processus associated to a given position
+    integer, dimension(:), allocatable          :: rece_rank    ! rank of processus wich send me information
+    integer, dimension(:), allocatable          :: send_rank    ! rank of processus to wich I send information
+    integer                                     :: rankP        ! rank of processus ("source rank" returned by mpi_cart_shift)
+    integer, dimension(:), allocatable          :: send_carto   ! cartogrpahy of what I have to send
+    integer                                     :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
+    integer                                     :: ind_for_i1   ! where to read the first coordinate (i1) of the current line inside the cartography ?
+    real(WP), dimension(:), allocatable         :: send_buffer  ! to store what I have to send (on a contiguous way)
+    integer, dimension(gs(1),gs(2),2)           :: rece_gap     ! distance between me and processus wich send me information
+    integer, dimension(2 , 2)                   :: send_gap     ! distance between me and processus to wich I send information
+    integer, dimension(2)                       :: rece_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer                                     :: com_size     ! size of message send/receive
+    integer                                     :: min_size     ! minimal size of cartography(:,proc_gap)
+    integer                                     :: max_size     ! maximal size of cartography(:,proc_gap)
+    integer                                     :: tag          ! mpi message tag
+    integer                                     :: ierr         ! mpi error code
+    integer, dimension(:), allocatable          :: s_request    ! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable          :: s_request_bis! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable          :: rece_request ! mpi communication request (handle) of nonblocking receive
+    integer, dimension(MPI_STATUS_SIZE)         :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(:,:), allocatable        :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block for wich the 
+    ! current processus requiers data from proc_gap and for each of these lines the range 
+    ! of mesh points from where it requiers the velocity values.
+
+    ! -- Initialisation --
+    do i2 = 1, gs(2) 
+       do i1 = 1, gs(1) 
+          do ind = 1, N_proc(direction)
+             nullify(Vp(ind,i1,i2)%pter)
+             nullify(Vm(ind,i1,i2)%pter)
+          end do
+       end do
+    end do
+    ! Compute the midlle point
+    p_pos_bis = p_pos_adim + (dt/2.0)*p_V/d_sc(direction)
+    p_V_bis = p_V
+    ! XXX Todo / Optim
+    ! Ici recopie de la vitesse. On doit la faire car on calcule la vitesse
+    ! interpolée à partir d' "elle-même" : la variable calculée dépend de sa
+    ! précédente valeur en des points qui peuvent différé. Si on l'écrase au fur et 
+    ! à mesure on fait des calculs faux.
+    ! On pourrait utiliser directement le champ de vitesse V en entrée pour donner
+    ! p_V en sortie, mais cela pose un problème car selon les directions il faudrait
+    ! changer l'ordre des indices i, i1 et i2. Ce qui est un beua bordel !!
+    ! XXX
+    ! Compute range of the set of point where I need the velocity value
+    rece_ind_min = floor(p_pos_bis(1,:,:))
+    rece_ind_max = floor(p_pos_bis(N_proc(direction),:,:)) + 1
+
+    ! ===== Exchange velocity field if needed =====
+    ! It uses non blocking message to do the computations during the communication process
+    ! -- What have I to communicate ? --
+    rece_gap(:,:,1) = floor(real(rece_ind_min-1)/N_proc(direction))
+    rece_gap(:,:,2) = floor(real(rece_ind_max-1)/N_proc(direction))
+    rece_gap_abs(1) = minval(rece_gap(:,:,1))
+    rece_gap_abs(2) = maxval(rece_gap(:,:,2))
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(cartography(max_size,rece_gap_abs(1):rece_gap_abs(2)))
+    allocate(rece_rank(rece_gap_abs(1):rece_gap_abs(2)))
+    call AC_velocity_determine_communication(direction, ind_group, gs, send_gap,  &
+         & rece_gap, rece_gap_abs, rece_rank, cartography)
+
+    ! -- Send messages about what I want --
+    allocate(s_request_bis(rece_gap_abs(1):rece_gap_abs(2)))
+    min_size = 2 + gs(2)
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+       if (rece_rank(proc_gap) /= D_rank(direction)) then
+          cartography(1,proc_gap) = 0
+          ! Use the cartography to know which lines are concerned
+          com_size = cartography(2,proc_gap)
+          ! Range I want - store into the cartography
+          gap = proc_gap*N_proc(direction)
+          ! Position in cartography(:,proc_gap) of the current i1 indice
+          ind_for_i1 = min_size
+          do i2 = 1, gs(2)
+             do ind = ind_for_i1+1, ind_for_i1 + cartography(2+i2,proc_gap), 2
+                do i1 = cartography(ind,proc_gap), cartography(ind+1,proc_gap)
+                   ! Interval start from:
+                   cartography(com_size+1,proc_gap) = max(rece_ind_min(i1,i2), gap+1) ! fortran => indice start from 0
+                   ! and ends at:
+                   cartography(com_size+2,proc_gap) = min(rece_ind_max(i1,i2), gap+N_proc(direction))
+                   ! update number of element to receive
+                   cartography(1,proc_gap) = cartography(1,proc_gap) &
+                        & + cartography(com_size+2,proc_gap) &
+                        & - cartography(com_size+1,proc_gap) + 1
+                   com_size = com_size+2
+                end do
+             end do
+             ind_for_i1 = ind_for_i1 + cartography(2+i2,proc_gap)
+          end do
+          ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.                          
+          tag = compute_tag(ind_group, tag_bufToScal_range, direction, proc_gap)
+          ! Send message
+          call mpi_Isend(cartography(1,proc_gap), com_size, MPI_INTEGER, send_rank(proc_gap), tag, &
+               D_comm(direction), s_request_bis(proc_gap),ierr)
+       end if
+    end do
+
+    ! -- Send the velocity field to processus which need it --
+    allocate(s_request(send_gap(1,1):send_gap(1,2)))
+    allocate(send_rank(send_gap(1,1):send_gap(1,2)))
+    allocate(send_carto(max_size))
+    do proc_gap = send_gap(1,1), send_gap(1,2)
+       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank(proc_gap), ierr)
+       if (send_rank(proc_gap) /= D_rank(direction)) then
+          ! I - Receive messages about what I have to send
+          ! Ia - Compute reception tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.             
+          tag = compute_tag(ind_group, tag_velo_range, direction, -proc_gap)
+          ! Ib - Receive the message
+          call mpi_recv(send_carto(1), max_size, MPI_INTEGER, send_rank(proc_gap), tag, D_comm(direction), rece_status, ierr)
+          ! II - Send it
+          ! IIa - Create send buffer
+          allocate(send_buffer(send_carto(1)))
+          gap = proc_gap*N_proc(direction)
+          com_size = 0
+          ind_1Dtable = send_carto(2)
+          ! Position in cartography(:,proc_gap) of the current i1 indice
+          ind_for_i1 = min_size
+          do i2 = 1, gs(2)
+             do ind = ind_for_i1+1, ind_for_i1 + send_carto(2+i2), 2
+                do i1 = send_carto(ind), send_carto(ind+1)
+                   do ind_com = send_carto(ind_1Dtable+1)+gap, send_carto(ind_1Dtable+2)+gap ! indice inside the current line
+                      com_size = com_size + 1
+                      send_buffer(com_size) = p_V(ind_com, i1,i2)
+                   end do
+                   ind_1Dtable = ind_1Dtable + 2
+                end do
+             end do
+             ind_for_i1 = ind_for_i1 + send_carto(2+i2)
+          end do
+          ! IIb - Compute send tag
+          tag = compute_tag(ind_group, tag_velo_V, direction, proc_gap)
+          ! IIc - Send message
+          call mpi_Isend(send_buffer(1), com_size, MPI_DOUBLE_PRECISION, &
+               & send_rank(proc_gap), tag, D_comm(direction), s_request(proc_gap), ierr)
+          deallocate(send_buffer)
+       end if
+    end do
+    deallocate(send_carto)
+
+    ! -- Non blocking reception of the velocity field --
+    ! Allocate the pos_in_buffer to compute V_buffer size and to be able to
+    ! allocate it.
+    allocate(pos_in_buffer(rece_gap_abs(1):rece_gap_abs(2)))
+    pos_in_buffer(rece_gap_abs(1)) = 1
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)-1
+       pos_in_buffer(proc_gap+1)= pos_in_buffer(proc_gap) + cartography(1,proc_gap)
+    end do
+    allocate(V_buffer(pos_in_buffer(rece_gap_abs(2)) &
+         & + cartography(1,rece_gap_abs(2))))
+    V_buffer = 0
+    allocate(rece_request(rece_gap_abs(1):rece_gap_abs(2)))
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+       if (rece_rank(proc_gap) /= D_rank(direction)) then
+          ! IIa - Compute reception tag
+          tag = compute_tag(ind_group, tag_velo_V, direction, -proc_gap)
+          ! IIb - Receive message
+          call mpi_Irecv(V_buffer(pos_in_buffer(proc_gap)), cartography(1,proc_gap), MPI_DOUBLE_PRECISION, &
+               & rece_rank(proc_gap), tag, D_comm(direction), rece_request(proc_gap), ierr) 
+       end if
+    end do
+    deallocate(cartography) ! We do not need it anymore
+
+    ! ===== Compute the interpolated velocity =====
+    ! -- Compute the interpolation weight and update the pointers Vp and Vm --
+    do i2 = 1, gs(2)
+       do i1 = 1, gs(1)
+          ! Initialisation of reccurence process
+          ind = 1
+          pos = floor(p_pos_bis(ind,i1,i2))
+          weight(ind,i1,i2) = p_pos_bis(ind,i1,i2)-pos
+          ! Vm = V(pos)
+          proc_gap = floor(real(pos-1)/N_proc(direction)) 
+          if (rece_rank(proc_gap) == D_rank(direction)) then
+             Vm(ind,i1,i2)%pter => p_V_bis(pos-proc_gap*N_proc(direction), i1,i2)
+          else 
+             Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+             pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+          end if
+          ! Vp = V(pos+1)
+          proc_gap = floor(real(pos+1-1)/N_proc(direction)) 
+          if (rece_rank(proc_gap) == D_rank(direction)) then
+             Vp(ind,i1,i2)%pter => p_V_bis(pos+1-proc_gap*N_proc(direction), i1,i2)
+          else 
+             Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+             pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+          end if
+          pos_old = pos
+
+          ! Following indice : we use previous work (already done)
+          do ind = 2, N_proc(direction)
+             pos = floor(p_pos_bis(ind,i1,i2))
+             weight(ind,i1,i2) = p_pos_bis(ind,i1,i2)-pos
+             select case(pos-pos_old)
+             case(0)
+                ! The particle belongs to the same segment than the previous one
+                Vm(ind,i1,i2)%pter => Vm(ind-1,i1,i2)%pter
+                Vp(ind,i1,i2)%pter => Vp(ind-1,i1,i2)%pter
+             case(1)
+                ! The particle follows the previous one
+                Vm(ind,i1,i2)%pter => Vp(ind-1,i1,i2)%pter
+                ! Vp = V(pos+1)
+                proc_gap = floor(real(pos+1-1)/N_proc(direction)) ! fortran -> indice starts from 1
+                if (rece_rank(proc_gap) == D_rank(direction)) then
+                   Vp(ind,i1,i2)%pter => p_V_bis(pos+1-proc_gap*N_proc(direction), i1,i2)
+                else 
+                   Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+                   pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+                end if
+             case(2)
+                ! pos = pos_old +2, wich correspond to "extention"
+                ! Vm = V(pos)
+                proc_gap = floor(real(pos-1)/N_proc(direction)) 
+                if (rece_rank(proc_gap) == D_rank(direction)) then
+                   Vm(ind,i1,i2)%pter => p_V_bis(pos-proc_gap*N_proc(direction), i1,i2)
+                else 
+                   Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+                   pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+                end if
+                ! Vp = V(pos+1)
+                proc_gap = floor(real(pos+1-1)/N_proc(direction)) 
+                if (rece_rank(proc_gap) == D_rank(direction)) then
+                   Vp(ind,i1,i2)%pter => p_V_bis(pos+1-proc_gap*N_proc(direction), i1,i2)
+                else 
+                   Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+                   pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+                end if
+             case default
+                print*, "unexpected case : pos = ", pos, " , pos_old = ", pos_old, &
+                     & " ind = ", ind, " i1 = ", i1, " i2 = ", i2
+             end select
+             pos_old = pos
+          end do ! loop on particle indice inside the current line
+       end do ! loop on first coordinate (i1) of a line inside the block of line
+    end do ! loop on second coordinate (i2) of a line inside the block of line
+    deallocate(pos_in_buffer)   ! We do not need it anymore
+
+    ! -- Compute the interpolate velocity --
+    ! Check if communication are done
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+       if (rece_rank(proc_gap)/=D_rank(direction)) then
+          call mpi_wait(rece_request(proc_gap), rece_status, ierr)
+       end if
+    end do
+    deallocate(rece_request)
+
+    ! Then compute the field
+    do i2 = 1, gs(2)
+       do i1 = 1, gs(1)
+          do ind = 1, N_proc(direction)
+             p_V(ind,i1,i2) = weight(ind,i1,i2)*Vp(ind,i1,i2)%pter + (1.-weight(ind,i1,i2))*Vm(ind,i1,i2)%pter
+          end do
+       end do
+    end do
+
+
+    ! ===== Free memory =====
+    ! -- Pointeur --
+    do i2 = 1, gs(2)
+       do i1 = 1, gs(1)
+          do ind = 1, N_proc(direction)
+             nullify(Vp(ind,i1,i2)%pter)
+             nullify(Vm(ind,i1,i2)%pter)
+          end do
+       end do
+    end do
+    ! -- Mpi internal buffer for non blocking communication --
+    do proc_gap = send_gap(1,1), send_gap(1,2)
+       if (send_rank(proc_gap) /= D_rank(direction)) then
+          call MPI_WAIT(s_request(proc_gap),rece_status,ierr)
+       end if
+    end do
+    deallocate(s_request)
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+       if (rece_rank(proc_gap) /= D_rank(direction)) then
+          call MPI_WAIT(s_request_bis(proc_gap),rece_status,ierr)
+       end if
+    end do
+    deallocate(s_request_bis)
+    ! -- Deallocate dynamic array --
+    deallocate(V_buffer)
+    deallocate(rece_rank)
+    deallocate(send_rank)
+
+  end subroutine AC_velocity_interpol_group
+
+  !> Determine the set of processes wich will send me information during the velocity interpolation and compute 
+  !! for each of these processes the range of wanted data.
+  !!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
+  !!    @param[in]    ind_group       = coordinate of the current group of lines
+  !!    @param[out]   send_gap        = gap between my coordinate and the processes of minimal coordinate which will send information to me
+  !!    @param[in]    rece_gap        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+  !!    @param[in]    rece_gap_abs    = min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+  !!    @param[out]   cartography     = cartography(proc_gap) contains the set of the lines indice in the block for wich the 
+  !!                                    current processus requiers data from proc_gap and for each of these lines the range 
+  !!                                    of mesh points from where it requiers the velocity values.
+  !! @details
+  !!    Work on a group of line of size gs(1) x gs(2))
+  !!    Obtain the list of processus wich need a part of my local velocity field
+  !!    to interpolate the velocity used in the RK2 scheme to advect its particles.
+  !!    In the same time, it computes for each processus from which I need a part
+  !!    of the velocity field, the range of mesh point where I want data and store it 
+  !!    by using some sparse matrix technics (see cartography defined in the
+  !!    algorithm documentation)
+  subroutine AC_velocity_determine_communication(direction, ind_group, gs, send_gap,  &
+       & rece_gap, rece_gap_abs, rece_rank, cartography)
+    ! XXX Work only for periodic condition.
+
+    ! Input/Ouput
+    integer, intent(in)                                 :: direction
+    integer, dimension(2), intent(in)                   :: ind_group
+    integer, dimension(2), intent(in)                   :: gs
+    integer, dimension(gs(1), gs(2), 2), intent(in)     :: rece_gap
+    integer, dimension(2), intent(in)                   :: rece_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer,dimension(rece_gap_abs(1):rece_gap_abs(2))&
+         &, intent(out)                                  :: rece_rank    ! rank of processus wich send me information
+    integer, dimension(2, 2), intent(out)               :: send_gap
+    integer, dimension(2+gs(2)*(2+3*gs(1)), &
+         & rece_gap_abs(1):rece_gap_abs(2)), intent(out) :: cartography
+    ! Others 
+    integer                             :: proc_gap         ! gap between a processus coordinate (along the current 
+    ! direction) into the mpi-topology and my coordinate
+    integer, dimension(gs(1), gs(2))    :: rece_gapP        ! gap between the coordinate of the previous processus (in the current direction)
+    ! and the processes of maximal coordinate which will receive information from it
+    integer, dimension(gs(1), gs(2))    :: rece_gapN        ! same as above but for the next processus
+    integer                             :: rankP            ! processus rank for shift (P= previous, N = next)
+    integer                             :: send_request_gh  ! mpi status of noindicelocking send
+    integer                             :: send_request_gh2 ! mpi status of noindicelocking send
+    integer                             :: ierr             ! mpi error code
+    integer, dimension(2)               :: tag_table        ! some mpi message tag
+    logical, dimension(:,:), allocatable:: test_request     ! for mpi non blocking communication
+    integer, dimension(:,:), allocatable:: send_request     ! for mpi non blocking send
+    integer                             :: ind1, ind2       ! indice of the current line inside the group
+    integer,dimension(2)                :: rece_buffer      ! buffer for reception of rece_max
+    integer, dimension(:,:), allocatable:: first, last      ! Storage processus to which I will be the first (or the last) to receive
+    integer                             :: min_size         ! begin indice in first and last to stock indice along first dimension of the group line
+    integer                             :: gp_size          ! group size
+    logical                             :: begin_interval   ! ware we in the start of an interval ?
+    logical                             :: not_myself       ! Is the target processus myself ?
+    integer, dimension(MPI_STATUS_SIZE) :: statut
+
+    send_gap(1,1) = 3*N(direction)
+    send_gap(1,2) = -3*N(direction)
+    send_gap(2,:) = 0
+    gp_size = gs(1)*gs(2)
+
+    ! ===== Communicate with my neigbors -> obtain ghost ! ====
+    ! Inform that about processus from which I need information
+    tag_table = compute_tag(ind_group, tag_obtrec_ghost_NP, direction)
+    call mpi_Isend(rece_gap(1,1,1), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(1), &
+         & D_comm(direction), send_request_gh, ierr)
+    call mpi_Isend(rece_gap(1,1,2), gp_size, MPI_INTEGER, neighbors(direction,2), tag_table(2), &
+         & D_comm(direction), send_request_gh2, ierr)
+    ! Receive the same message form my neighbors
+    call mpi_recv(rece_gapN(1,1), gp_size, MPI_INTEGER, neighbors(direction,2), tag_table(1), D_comm(direction), statut, ierr)
+    call mpi_recv(rece_gapP(1,1), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(2), D_comm(direction), statut, ierr)
+
+    ! ===== Compute if I am first or last and determine the carography =====
+    min_size = 2 + gs(2)
+    ! Initialize first and last to determine if I am the the first or the last processes (considering the current direction) 
+    ! to require information from this processus
+    allocate(first(2,rece_gap_abs(1):rece_gap_abs(2)))
+    first(2,:) = 0  ! number of lines for which I am the first
+    allocate(last(2,rece_gap_abs(1):rece_gap_abs(2)))
+    last(2,:) = 0   ! number of lines for which I am the last
+    ! Initialize cartography
+    cartography(1,:) = 0            ! number of velocity values to receive
+    cartography(2,:) = min_size     ! number of element to send when sending cartography
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+       first(1,proc_gap) = -proc_gap
+       last(1,proc_gap) = -proc_gap
+       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rece_rank(proc_gap), ierr)
+       not_myself = (rece_rank(proc_gap) /= D_rank(direction)) ! Is the target processus myself ?
+       do ind2 = 1, gs(2)
+          cartography(2+ind2,proc_gap) = 0    ! 2 x number of interval of concern line into the column i2
+          begin_interval = .true.
+          do ind1 = 1, gs(1)
+             ! Does proc_gap belongs to [rece_gap(i1,i2,1);rece_gap(i1,i2,2)]?
+             if((proc_gap>=rece_gap(ind1,ind2,1)).and.(proc_gap<=rece_gap(ind1,ind2,2))) then
+                ! Compute if I am the first.
+                if (proc_gap>rece_gapP(ind1,ind2)-1) then
+                   first(2,proc_gap) =  first(2,proc_gap)+1
+                end if
+                ! Compute if I am the last.
+                if (proc_gap<rece_gapN(ind1,ind2)+1) then
+                   last(2,proc_gap) =  last(2,proc_gap)+1
+                end if
+                ! Update cartography // Not need I target processus is myself
+                if (not_myself) then
+                   if (begin_interval) then 
+                      cartography(2+ind2,proc_gap) =  cartography(2+ind2,proc_gap)+2
+                      cartography(cartography(2,proc_gap)+1,proc_gap) = ind1
+                      cartography(2,proc_gap) = cartography(2,proc_gap) + 2
+                      cartography(cartography(2,proc_gap),proc_gap) = ind1
+                      begin_interval = .false.
+                   else
+                      cartography(cartography(2,proc_gap),proc_gap) = ind1
+                   end if
+                end if
+             else
+                begin_interval = .true.
+             end if
+          end do
+       end do
+    end do
+
+    ! ===== Send information about first and last  =====
+    tag_table = compute_tag(ind_group, tag_obtrec_NP, direction)
+    allocate(send_request(rece_gap_abs(1):rece_gap_abs(2),2))
+    allocate(test_request(rece_gap_abs(1):rece_gap_abs(2),2))
+    test_request = .false.
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+       ! I am the first ?
+       if (first(2,proc_gap)>0) then
+          if(rece_rank(proc_gap)/= D_rank(direction)) then
+             call mpi_Isend(first(1,proc_gap), 2, MPI_INTEGER, rece_rank(proc_gap), tag_table(1), D_comm(direction), &
+                  & send_request(proc_gap,1), ierr)
+             test_request(proc_gap,1) = .true.
+          else
+             send_gap(1,1) = min(send_gap(1,1), -proc_gap)
+             send_gap(2,1) = send_gap(2,1) + first(2,proc_gap)
+          end if
+       end if
+       ! I am the last ?
+       if (last(2,proc_gap)>0) then
+          if(rece_rank(proc_gap)/= D_rank(direction)) then
+             call mpi_Isend(last(1,proc_gap), 2, MPI_INTEGER, rece_rank(proc_gap), tag_table(2), D_comm(direction), &
+                  & send_request(proc_gap,2), ierr)
+             test_request(proc_gap,2) = .true.
+          else
+             send_gap(1,2) = max(send_gap(1,2), -proc_gap)
+             send_gap(2,2) = send_gap(2,2) + last(2,proc_gap)
+          end if
+       end if
+    end do
+
+
+
+    ! ===== Receive information form the first and the last processus which need a part of my local velocity field =====
+    do while(send_gap(2,1) < gp_size)
+       call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+       send_gap(1,1) = min(send_gap(1,1), rece_buffer(1))
+       send_gap(2,1) = send_gap(2,1) + rece_buffer(2)
+    end do
+    do while(send_gap(2,2) < gp_size)
+       call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+       send_gap(1,2) = max(send_gap(1,2), rece_buffer(1))
+       send_gap(2,2) = send_gap(2,2) + rece_buffer(2)
+    end do
+
+    ! ===== Free Isend buffer =====
+    call MPI_WAIT(send_request_gh,statut,ierr)
+    call MPI_WAIT(send_request_gh2,statut,ierr)
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+       if (test_request(proc_gap,1).eqv. .true.) call MPI_WAIT(send_request(proc_gap,1),statut,ierr)
+       if (test_request(proc_gap,2)) call MPI_WAIT(send_request(proc_gap,2),statut,ierr)
+    end do
+    deallocate(send_request)
+    deallocate(test_request)
+
+  end subroutine AC_velocity_determine_communication
+
+
+  ! ===================================================================================================
+  ! ====================     Others than velocity interpolation and remeshing      ====================
+  ! ===================================================================================================
+
+  !> Determine type (center or left) of each block and tagfor a complete group of
+  !! lines.
+  !! corrected remeshing formula are recquired.
+  !!    @param[in]        dt          = time step
+  !!    @param[in]        dir         = current direction (1 = along X, 2 = along Y and 3 = along Z)
+  !!    @param[in]        gp_s        = size of a group (ie number of line it gathers along the two other directions)
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        p_V         = particle velocity (along the current direction)
+  !!    @param[out]       bl_type     = table of blocks type (center of left)
+  !!    @param[out]       bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block 
+  !!                                    and the begining of the following one is tagged)
+  !! @details
+  !!        This subroutine work on a groupe of line. For each line of this group, it
+  !!    determine the type of each block of this line and where corrected remeshing
+  !!    formula are required. In those points, it tagg block transition (ie the end of
+  !!    the current block and the beginning of the following one) in order to indicate
+  !!    that corrected weigth have to be used during the remeshing.
+  subroutine AC_type_and_block_group(dt, dir, gp_s, ind_group, p_V,bl_type, bl_tag)
+
+    real(WP), intent(in)                                        :: dt           ! time step
+    integer, intent(in)                                         :: dir
+    integer, dimension(2),intent(in)                            :: gp_s         ! groupe size
+    integer, dimension(2), intent(in)                           :: ind_group    ! group indice
+    real(WP), dimension(:,:,:), intent(in)                      :: p_V
+    logical,dimension(bl_nb(dir)+1,gp_s(1),gp_s(2)),intent(out) :: bl_type      ! is the particle block a center block or a left one ?
+    logical,dimension(bl_nb(dir),gp_s(1),gp_s(2)),intent(out)   :: bl_tag       ! indice of tagged particles
+
+    real(WP),dimension(bl_nb(dir)+1,gp_s(1),gp_s(2))            :: bl_lambdaMin ! for a particle, lamda = V*dt/dx ;  bl_lambdaMin = min of 
+    ! lambda on a block (take also into account first following particle)
+    real(WP),dimension(gp_s(1),gp_s(2))                         :: lambP, lambN ! buffer to exchange some lambda min with other processus
+    real(WP),dimension(gp_s(1),gp_s(2))                         :: lambB, lambE ! min value of lambda of the begin of the line and at the end of the line
+    integer, dimension(bl_nb(dir)+1,gp_s(1),gp_s(2))            :: bl_ind       ! block index : integer as lambda in (bl_ind,bl_ind+1) for a left block
+    ! and lambda in (bl_ind-1/2, bl_ind+1/2) for a right block 
+    integer                                                     :: ind,i_p      ! some indices
+    real(WP)                                                    :: cfl          ! = d_sc
+    integer, dimension(2)                                       :: send_request ! mpi status of nonblocking send
+    integer, dimension(2)                                       :: rece_request ! mpi status of nonblocking receive
+    integer, dimension(MPI_STATUS_SIZE)                         :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(MPI_STATUS_SIZE)                         :: send_status  ! mpi status (for mpi_wait)
+    integer, dimension(2)                                       :: tag_table    ! other tags for mpi message
+    integer                                                     :: ierr         ! mpi error code
+
+    ! ===== Initialisation =====
+    cfl = dt/d_sc(dir)
+
+    ! ===== Compute bl_lambdaMin =====
+
+    ! -- For the first block (1/2) --
+    ! The domain contains only its second half => exchange ghost with the previous processus
+    lambB = minval(p_V(1:(bl_size/2)+1,:,:),1)*cfl
+    tag_table = compute_tag(ind_group, tag_part_tag_NP, dir)
+    ! Send message
+    call mpi_Isend(lambB(1,1), gp_s(1)*gp_s(2), MPI_DOUBLE_PRECISION, &
+         & neighbors(dir,1), tag_table(1), D_comm(dir), send_request(1), ierr)
+    ! Receive it
+    call mpi_Irecv(lambN(1,1), gp_s(1)*gp_s(2), MPI_DOUBLE_PRECISION, &
+         & neighbors(dir,2), tag_table(1), D_comm(dir), rece_request(1), ierr) 
+
+    ! -- For the last block (1/2) --
+    ! The processus contains only its first half => exchange ghost with the next processus
+    ind = bl_nb(dir) + 1
+    lambE = minval(p_V(N_proc(dir) - (bl_size/2)+1 :N_proc(dir),:,:),1)*cfl
+    ! Send message
+    call mpi_Isend(lambE(1,1), gp_s(1)*gp_s(2), MPI_DOUBLE_PRECISION, &
+         & neighbors(dir,2), tag_table(2), D_comm(dir), send_request(2), ierr)
+    ! Receive it
+    call mpi_Irecv(lambP(1,1), gp_s(1)*gp_s(2), MPI_DOUBLE_PRECISION, &
+         &  neighbors(dir,1), tag_table(2), D_comm(dir), rece_request(2), ierr) 
+
+    ! -- For the "middle" block --
+    do ind = 2, bl_nb(dir)
+       i_p = ((ind-1)*bl_size) + 1 - bl_size/2
+       bl_lambdaMin(ind,:,:) = minval(p_V(i_p:i_p+bl_size,:,:),1)*cfl
+    end do
+
+    ! -- For the first block (1/2) --
+    ! The domain contains only its second half => use exchanged ghost
+    ! Check reception
+    call mpi_wait(rece_request(2), rece_status, ierr)
+    bl_lambdaMin(1,:,:) = min(lambB(:,:), lambP(:,:))
+
+    ! -- For the last block (1/2) --
+    ! The processus contains only its first half => use exchanged ghost
+    ! Check reception
+    call mpi_wait(rece_request(1), rece_status, ierr)
+    ind = bl_nb(dir) + 1
+    bl_lambdaMin(ind,:,:) = min(lambE(:,:), lambN(:,:))
+
+    ! ===== Compute block type and index =====
+    bl_ind = nint(bl_lambdaMin)
+    bl_type = (bl_lambdaMin<dble(bl_ind))
+
+    ! ===== Tag particles =====
+
+    do ind = 1, bl_nb(dir)
+       bl_tag(ind,:,:) = ((bl_ind(ind,:,:)/=bl_ind(ind+1,:,:)) .and. &
+            & (bl_type(ind,:,:).neqv.bl_type(ind+1,:,:)))
+    end do
+
+    call mpi_wait(send_request(1), send_status, ierr)
+    call mpi_wait(send_request(2), send_status, ierr)
+
+  end subroutine AC_type_and_block_group
+
+  ! ============================================================================
+  ! ====================     Tools to remesh particles      ====================
+  ! ============================================================================
+
+  !> Determine the set of processes wich will send me information during the remeshing 
+  !! and compute for each of these processes the range of wanted data. Use implicit 
+  !! computation rather than communication (only possible if particle are gather by 
+  !! block whith contrainst on velocity variation - as corrected lambda formula.) -
+  !! work directly on a group of particles lines.
+  !     @param[in]    send_group_min  = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+  !     @param[in]    send_group_max  = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+  !!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
+  !!    @param[in]    ind_group       = coordinate of the current group of lines
+  !!    @param[out]   proc_min        = gap between my coordinate and the processes of minimal coordinate which will receive information from me
+  !!    @param[out]   proc_max        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+  !!    @param[out]   rece_gap        = coordinate range of processes which will send me information during the remeshing.
+  !!    @param[in]    gs              = size of group of line along the current direction
+  !! @details
+  !!    Work on a group of line of size gs(1) x gs(2))
+  !!    Obtain the list of processts which are associated to sub-domain where my partticles
+  !!    will be remeshed and the list of processes wich contains particles which
+  !!    have to be remeshed in my sub-domain. This way, this procedure determine
+  !!    which processus need to communicate together in order to proceed to the
+  !!    remeshing (as in a parrallel context the real space is subdivised and each
+  !!    processus contains a part of it)
+  !!        In the same time, it computes for each processus with which I will
+  !!    communicate, the range of mesh point involved for each line of particles 
+  !!    inside the group and it stores it by using some sparse matrix technics 
+  !!    (see cartography defined in the algorithm documentation)
+  !!        This routine does not involve any computation to determine if
+  !!    a processus is the first or the last processes (considering its coordinate along 
+  !!    the current directory) to send remeshing information to a given processes.
+  !!    It directly compute it using contraints on velocity (as in corrected lambda
+  !!    scheme) When possible use it rather than AC_obtain_senders_com
+  subroutine AC_remesh_determine_communication(direction, gs, ind_group, rece_gap, send_gap, send_gap_abs, send_rank, cartography)
+    ! XXX Work only for periodic condition. For dirichlet conditions : it is
+    ! possible to not receive either rece_gap(1), either rece_gap(2) or none of
+    ! these two => detect it (track the first and the last particles) and deal with it.
+
+    ! Input/output
+    integer, intent(in)                                 :: direction
+    integer, dimension(2), intent(in)                   :: gs           ! group size
+    integer, dimension(2), intent(in)                   :: ind_group
+    integer, dimension(2, 2), intent(out)               :: rece_gap
+    integer(kind=4), dimension(gs(2),gs(1),2)           :: send_gap     ! minimal and maximal processus which contains the sub-domains where my 
+    ! particles will be remeshed for each line of the line group
+    integer, dimension(2), intent(in)                   :: send_gap_abs ! min and maximal processus which contains the sub-domains where my particles will be remeshed.
+    integer,dimension(send_gap_abs(1):send_gap_abs(2))&
+         &, intent(out)                                  :: send_rank    ! rank of processus to wich I will send data
+    integer, dimension(2+gs(2)*(2+3*gs(1)), &
+         & send_gap_abs(1):send_gap_abs(2)), intent(out) :: cartography
+    ! Other local variable
+    integer(kind=4)                                 :: proc_gap         ! gap between a processus coordinate (along the current 
+    ! direction) into the mpi-topology and my coordinate
+    integer                                         :: rankP            ! processus rank for shift (P= previous, N = next)
+    integer, dimension(2)                           :: tag_table        ! mpi message tag (for communicate rece_gap(1) and rece_gap(2))
+    integer, dimension(:,:), allocatable            :: first, last      ! Storage processus to which I will be the first (or the last) to send
+    ! remeshed particles
+    integer, dimension(2)                           :: first_condition  ! allowed range of value of proc_min and proc_max for being the first
+    integer, dimension(2)                           :: last_condition   ! allowed range of value of proc_min and proc_max for being the last
+    integer, dimension(:,:),allocatable             :: send_request     ! mpi status of nonblocking send
+    integer                                         :: ierr             ! mpi error code
+    integer, dimension(MPI_STATUS_SIZE)             :: statut           ! mpi status
+    integer                                         :: ind1, ind2       ! indice of the current line inside the group
+    integer                                         :: min_size         ! begin indice in first and last to stock indice along first dimension of the group line
+    integer                                         :: gp_size          ! group size
+    integer,dimension(2)                            :: rece_buffer      ! buffer for reception of rece_max
+    logical                                         :: begin_interval   ! ware we in the start of an interval ?
+
+    rece_gap(1,1) = 3*N(direction)
+    rece_gap(1,2) = -3*N(direction)
+    rece_gap(2,:) = 0
+    gp_size = gs(1)*gs(2)
+
+    allocate(send_request(send_gap_abs(1):send_gap_abs(2),3))
+    send_request(:,3) = 0
+
+    ! ===== Compute if I am first or last and determine the carography =====
+    min_size = 2 + gs(2)
+    ! Initialize first and last to determine if I am the the first or the last processes (considering the current direction) 
+    ! to require information from this processus
+    allocate(first(2,send_gap_abs(1):send_gap_abs(2)))
+    first(2,:) = 0  ! number of lines for which I am the first
+    allocate(last(2,send_gap_abs(1):send_gap_abs(2)))
+    last(2,:) = 0   ! number of lines for which I am the last
+    ! Initialize cartography
+    cartography(1,:) = 0            ! number of velocity values to receive
+    cartography(2,:) = min_size     ! number of element to send when sending cartography
+    ! And compute cartography, first and last !
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+       first(1,proc_gap) = -proc_gap
+       last(1,proc_gap) = -proc_gap
+       first_condition(2) = proc_gap*N_proc(direction)+1
+       first_condition(1) = 1-2*bl_bound_size + first_condition(2)
+       last_condition(2)  = (proc_gap+1)*N_proc(direction)
+       last_condition(1)  = -1+2*bl_bound_size + last_condition(2)
+       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank(proc_gap), ierr)
+       do ind2 = 1, gs(2)
+          cartography(2+ind2,proc_gap) = 0    ! 2 x number of interval of concern line into the column i2
+          begin_interval = .true.
+          do ind1 = 1, gs(1)
+             ! Does proc_gap belongs to [send_gap(i1,i2,1);send_gap(i1,i2,2)]?
+             if((proc_gap>=send_gap(ind1,ind2,1)).and.(proc_gap<=send_gap(ind1,ind2,2))) then
+                ! Compute if I am the first.
+                if ((send_group_min(ind1,ind2)< first_condition(1)).AND. &
+                     & (send_group_max(ind1,ind2)>= first_condition(2))) then
+                   first(2,proc_gap) =  first(2,proc_gap)+1
+                end if
+                ! Compute if I am the last.
+                if ((send_group_max(ind1,ind2) > last_condition(1)) &
+                     & .AND.(send_group_min(ind1,ind2)<= last_condition(2))) then
+                   last(2,proc_gap) =  last(2,proc_gap)+1
+                end if
+                ! Update cartography // Needed even if target processus is myself as we us buffer in all the case (scalar field cannot be used directly during the remeshing)
+                if (begin_interval) then 
+                   cartography(2+ind2,proc_gap) =  cartography(2+ind2,proc_gap)+2
+                   cartography(cartography(2,proc_gap)+1,proc_gap) = ind1
+                   cartography(2,proc_gap) = cartography(2,proc_gap) + 2
+                   cartography(cartography(2,proc_gap),proc_gap) = ind1
+                   begin_interval = .false.
+                else
+                   cartography(cartography(2,proc_gap),proc_gap) = ind1
+                end if
+             else
+                begin_interval = .true.
+             end if
+          end do
+       end do
+    end do
+
+    ! ===== Send information about first and last  =====
+    tag_table = compute_tag(ind_group, tag_obtsend_NP, direction)
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+       ! I am the first ?
+       if (first(2,proc_gap)>0) then
+          if(send_rank(proc_gap)/= D_rank(direction)) then
+             call mpi_Isend(first(1,proc_gap), 2, MPI_INTEGER, send_rank(proc_gap), tag_table(1), D_comm(direction), &
+                  & send_request(proc_gap,1), ierr)
+             send_request(proc_gap,3) = 1
+          else
+             rece_gap(1,1) = min(rece_gap(1,1), -proc_gap)
+             rece_gap(2,1) = rece_gap(2,1) + first(2,proc_gap)
+          end if
+       end if
+       ! I am the last ?
+       if (last(2,proc_gap)>0) then
+          if(send_rank(proc_gap)/= D_rank(direction)) then
+             call mpi_Isend(last(1,proc_gap), 2, MPI_INTEGER, send_rank(proc_gap), tag_table(2), D_comm(direction), &
+                  & send_request(proc_gap,2), ierr)
+             send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
+          else
+             rece_gap(1,2) = max(rece_gap(1,2), -proc_gap)
+             rece_gap(2,2) = rece_gap(2,2) + last(2,proc_gap)
+          end if
+       end if
+    end do
+
+    ! ===== Receive information form the first and the last processus which need a part of my local velocity field =====
+    do while(rece_gap(2,1) < gp_size)
+       call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+       rece_gap(1,1) = min(rece_gap(1,1), rece_buffer(1))
+       rece_gap(2,1) = rece_gap(2,1) + rece_buffer(2)
+    end do
+    do while(rece_gap(2,2) < gp_size)
+       call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+       rece_gap(1,2) = max(rece_gap(1,2), rece_buffer(1))
+       rece_gap(2,2) = rece_gap(2,2) + rece_buffer(2)
+    end do
+
+    ! ===== Free Isend buffer =====
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+       select case (send_request(proc_gap,3))
+       case (3)
+          call mpi_wait(send_request(proc_gap,1), statut, ierr)
+          call mpi_wait(send_request(proc_gap,2), statut, ierr)
+       case (2)
+          call mpi_wait(send_request(proc_gap,2), statut, ierr)
+       case (1)
+          call mpi_wait(send_request(proc_gap,1), statut, ierr)
+       end select
+    end do
+
+  end subroutine AC_remesh_determine_communication
+
+
+  !> Determine the set of processes wich will send me information during the remeshing 
+  !! and compute for each of these processes the range of wanted data. Use implicit 
+  !! computation rather than communication (only possible if particle are gather by 
+  !! block whith contrainst on velocity variation - as corrected lambda formula.) -
+  !! work directly on a group of particles lines.
+  !     @param[in]    send_group_min  = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+  !     @param[in]    send_group_max  = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+  !!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
+  !!    @param[in]    ind_group       = coordinate of the current group of lines
+  !!    @param[out]   proc_min        = gap between my coordinate and the processes of minimal coordinate which will receive information from me
+  !!    @param[out]   proc_max        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+  !!    @param[out]   rece_proc       = coordinate range of processes which will send me information during the remeshing.
+  !!    @param[in]    gp_s            = size of group of line along the current direction
+  !! @details
+  !!    Work on a group of line of size gs(1) x gs(2))
+  !!    Obtain the list of processts which are associated to sub-domain where my partticles
+  !!    will be remeshed and the list of processes wich contains particles which
+  !!    have to be remeshed in my sub-domain. This way, this procedure determine
+  !!    which processus need to communicate together in order to proceed to the
+  !!    remeshing (as in a parrallel context the real space is subdivised and each
+  !!    processus contains a part of it)
+  !!        In the same time, it computes for each processus with which I will
+  !!    communicate, the range of mesh point involved for each line of particles 
+  !!    inside the group and it stores it by using some sparse matrix technics 
+  !!    (see cartography defined in the algorithm documentation)
+  !!        This routine does not involve any computation to determine if
+  !!    a processus is the first or the last processes (considering its coordinate along 
+  !!    the current directory) to send remeshing information to a given processes.
+  !!    It directly compute it using contraints on velocity (as in corrected lambda
+  !!    scheme) When possible use it rather than AC_obtain_senders_com
+  !> Determine the set of processes wich will send me information during the remeshing 
+  !! and compute for each of these processes the range of wanted data. Use implicit 
+  !! computation rather than communication (only possible if particle are gather by 
+  !! block whith contrainst on velocity variation - as corrected lambda formula.) -
+  !! work directly on a group of particles lines.
+  !     @param[in]    send_group_min  = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+  !     @param[in]    send_group_max  = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+  !!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
+  !!    @param[in]    ind_group       = coordinate of the current group of lines
+  !!    @param[out]   proc_min        = gap between my coordinate and the processes of minimal coordinate which will receive information from me
+  !!    @param[out]   proc_max        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+  !!    @param[out]   rece_proc       = coordinate range of processes which will send me information during the remeshing.
+  !!    @param[in]    gp_s            = size of group of line along the current direction
+  !! @details
+  !!    Work on a group of line of size gs(1) x gs(2))
+  !!    Obtain the list of processts which are associated to sub-domain where my partticles
+  !!    will be remeshed and the list of processes wich contains particles which
+  !!    have to be remeshed in my sub-domain. This way, this procedure determine
+  !!    which processus need to communicate together in order to proceed to the
+  !!    remeshing (as in a parrallel context the real space is subdivised and each
+  !!    processus contains a part of it)
+  !!        In the same time, it computes for each processus with which I will
+  !!    communicate, the range of mesh point involved for each line of particles 
+  !!    inside the group and it stores it by using some sparse matrix technics 
+  !!    (see cartography defined in the algorithm documentation)
+  !!        This routine does not involve any computation to determine if
+  !!    a processus is the first or the last processes (considering its coordinate along 
+  !!    the current directory) to send remeshing information to a given processes.
+  !!    It directly compute it using contraints on velocity (as in corrected lambda
+  !!    scheme) When possible use it rather than AC_obtain_senders_com
+  subroutine AC_obtain_senders_group(direction, gp_s, ind_group, proc_min, proc_max, rece_proc)
+    ! XXX Work only for periodic condition. For dirichlet conditions : it is
+    ! possible to not receive either rece_proc(1), either rece_proc(2) or none of
+    ! these two => detect it (track the first and the last particles) and deal with it.
+
+    ! Input/output
+    integer, intent(in)                             :: direction
+    integer, dimension(2), intent(in)               :: ind_group
+    integer(kind=4), dimension(:,:), intent(out)    :: proc_min, proc_max
+    integer, dimension(:,:,:), intent(out)          :: rece_proc
+    integer, dimension(2), intent(in)               :: gp_s
+    ! Other local variable
+    integer(kind=4)                                 :: proc_gap         ! gap between a processus coordinate (along the current 
+    ! direction) into the mpi-topology and my coordinate
+    integer                                         :: rankP, rankN     ! processus rank for shift (P= previous, N = next)
+    integer, dimension(2)                           :: tag_table        ! mpi message tag (for communicate rece_proc(1) and rece_proc(2))
+    integer                                         :: proc_max_abs     ! maximum of proc_max array
+    integer                                         :: proc_min_abs     ! minimum of proc_min array
+    integer, dimension(:,:), allocatable            :: first, last      ! Storage processus to which I will be the first (or the last) to send
+    ! remeshed particles
+    integer, dimension(2)                           :: first_condition  ! allowed range of value of proc_min and proc_max for being the first
+    integer, dimension(2)                           :: last_condition   ! allowed range of value of proc_min and proc_max for being the last
+    integer, dimension(:,:),allocatable             :: send_request     ! mpi status of nonblocking send
+    integer                                         :: ierr             ! mpi error code
+    integer, dimension(MPI_STATUS_SIZE)             :: statut           ! mpi status
+    integer                                         :: ind1, ind2       ! indice of the current line inside the group
+    integer                                         :: min_size         ! begin indice in first and last to stock indice along first dimension of the group line
+    integer                                         :: indice           ! internal indice
+    integer, dimension(1 + gp_s(2)*(1+gp_s(1)))         :: rece_buffer      ! buffer for reception of rece_max
+
+    rece_proc = 3*N(direction)
+
+    proc_min = floor(real(send_group_min-1)/N_proc(direction))
+    proc_max = floor(real(send_group_max-1)/N_proc(direction))
+    proc_min_abs = minval(proc_min)
+    proc_max_abs = maxval(proc_max)
+
+    allocate(send_request(proc_min_abs:proc_max_abs,3))
+    send_request(:,3) = 0
+
+    ! -- Determine if I am the first or the last to send information to a given
+    ! processus and sort line by target processes for which I am the first and
+    ! for which I am the last. --
+    tag_table = compute_tag(ind_group, tag_obtsend_NP, direction)
+    min_size = 2 + gp_s(2)
+    allocate(first(2*(gp_s(1)*gp_s(2)+1),proc_min_abs:proc_max_abs))
+    first(1,:) = min_size
+    allocate(last(2*(gp_s(1)*gp_s(2)+1),proc_min_abs:proc_max_abs))
+    last(1,:) = min_size
+    do proc_gap = proc_min_abs, proc_max_abs
+       first(2,proc_gap) = -proc_gap
+       last(2,proc_gap) = -proc_gap
+       first_condition(2) = proc_gap*N_proc(direction)+1
+       first_condition(1) = 1-2*bl_bound_size + first_condition(2)
+       last_condition(2)  = (proc_gap+1)*N_proc(direction)
+       last_condition(1)  = -1+2*bl_bound_size + last_condition(2)
+       do ind2 = 1, gp_s(2)
+          first(2+ind2,proc_gap) = 0
+          last(2+ind2,proc_gap) =  0
+          do ind1 = 1, gp_s(1)
+             ! Compute if I am the first.
+             if ((send_group_min(ind1,ind2)< first_condition(1)).AND. &
+                  & (send_group_max(ind1,ind2)>= first_condition(2))) then
+                first(2+ind2,proc_gap) =  first(2+ind2,proc_gap)+1
+                first(1,proc_gap) = first(1,proc_gap) + 1
+                first(first(1,proc_gap),proc_gap) = ind1
+             end if
+             ! Compute if I am the last.
+             if ((send_group_max(ind1,ind2) > last_condition(1)) &
+                  & .AND.(send_group_min(ind1,ind2)<= last_condition(2))) then
+                last(2+ind2,proc_gap) =  last(2+ind2,proc_gap)+1
+                last(1,proc_gap) = last(1,proc_gap) + 1
+                last(last(1,proc_gap),proc_gap) = ind1
+             end if
+          end do
+       end do
+    end do
+
+
+    ! -- Send information if I am the first or the last --
+    do proc_gap = proc_min_abs, proc_max_abs
+       ! I am the first ?
+       if (first(1,proc_gap)>min_size) then
+          ! Compute the rank of the target processus
+          call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
+          if(rankN /= D_rank(direction)) then
+             call mpi_Isend(first(2,proc_gap), first(1,proc_gap)-1, MPI_INTEGER, rankN, tag_table(1), D_comm(direction), &
+                  & send_request(proc_gap,1), ierr)
+             send_request(proc_gap,3) = 1
+          else
+             indice = min_size
+             do ind2 = 1, gp_s(2)
+                do ind1 = 1, first(2+ind2,proc_gap)
+                   indice = indice+1
+                   rece_proc(1,first(indice,proc_gap),ind2) = -proc_gap
+                end do
+             end do
+          end if
+       end if
+       ! I am the last ?
+       if (last(1,proc_gap)>min_size) then
+          ! Compute the rank of the target processus
+          call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
+          if(rankN /= D_rank(direction)) then
+             call mpi_Isend(last(2,proc_gap), last(1,proc_gap)-1, MPI_INTEGER, rankN, tag_table(2), D_comm(direction), &
+                  & send_request(proc_gap,2), ierr)
+             send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
+          else
+             indice = min_size
+             do ind2 = 1, gp_s(2)
+                do ind1 = 1, last(2+ind2,proc_gap)
+                   indice = indice+1
+                   rece_proc(2,last(indice,proc_gap),ind2) = -proc_gap
+                end do
+             end do
+          end if
+       end if
+    end do
+
+    ! -- Receive it --
+    ! size_max = size(rece_buffer) ! 2 + 2*gp_s(1)*gp_s(2)
+    do while(any(rece_proc(1,:,:) == 3*N(direction)))
+       call mpi_recv(rece_buffer(1), size(rece_buffer), MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+       indice = min_size-1
+       do ind2 = 1, gp_s(2)
+          do ind1 = 1, rece_buffer(1+ind2)
+             indice = indice+1
+             rece_proc(1,rece_buffer(indice),ind2) = rece_buffer(1)
+          end do
+       end do
+    end do
+    do while(any(rece_proc(2,:,:) == 3*N(direction)))
+       call mpi_recv(rece_buffer(1), size(rece_buffer), MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+       indice = min_size-1
+       do ind2 = 1, gp_s(2)
+          do ind1 = 1, rece_buffer(1+ind2)
+             indice = indice+1
+             rece_proc(2,rece_buffer(indice),ind2) = rece_buffer(1)
+          end do
+       end do
+    end do
+
+    ! -- Free Isend buffer --
+    do proc_gap = proc_min_abs, proc_max_abs
+       select case (send_request(proc_gap,3))
+       case (3)
+          call mpi_wait(send_request(proc_gap,1), statut, ierr)
+          call mpi_wait(send_request(proc_gap,2), statut, ierr)
+       case (2)
+          call mpi_wait(send_request(proc_gap,2), statut, ierr)
+       case (1)
+          call mpi_wait(send_request(proc_gap,1), statut, ierr)
+       end select
+    end do
+
+  end subroutine AC_obtain_senders_group
+
+
+  !> Determine the set of processes wich will send me information during the
+  !!  scalar remeshing by explicit (and exensive) way : communications !
+  !!    @param[in]    direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+  !!    @param[in]    ind_group   = coordinate of the current group of lines
+  !!    @param[out]   proc_min    = gap between my coordinate and the processes of minimal coordinate which will receive information from me
+  !!    @param[out]   proc_max    = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+  !!    @param[out]   rece_proc   = coordinate range of processes which will send me information during the remeshing.
+  !!    @param[in]    gp_s        = size of group of line along the current direction
+  !!    @param[in]    com         = integer used to distinguish this function from AC_obtain_senders_group.
+  !! @details
+  !!    Obtain the list of processus which contains some particles which belong to
+  !!    my subdomains after their advection (and thus which will be remeshing into
+  !!    my subdomain). This result is return as an interval [send_min; send_max].
+  !!    All the processus whose coordinate (into the current direction) belong to 
+  !!    this segment are involved into scalar remeshing into the current
+  !!    subdomains. Use this method when the sender are not predictable without
+  !!    communication, as in M'6 schemes for instance. More precisly, it
+  !!    correspond do scheme without bloc of particle involving velocity variation
+  !!    contrainsts to avoid that the distance between to particle grows (or dimishes)
+  !!    too much.
+  subroutine AC_obtain_senders_com(direction, gp_s, ind_group, proc_min, proc_max, rece_proc, com)
+    ! XXX Work only for periodic condition. See AC_obtain_senders. Adapt it for
+    ! other condition must be more easy.
+
+    ! Input/output
+    integer, intent(in)                             :: direction
+    integer, dimension(2), intent(in)               :: ind_group
+    integer(kind=4), dimension(:,:), intent(out)    :: proc_min, proc_max
+    integer, dimension(:,:,:), intent(out)          :: rece_proc
+    integer, dimension(2), intent(in)               :: gp_s
+    integer, intent(in)                             :: com
+    ! Other local variable
+    integer(kind=4)                                 :: proc_gap         ! gap between a processus coordinate (along the current 
+    ! direction) into the mpi-topology and my coordinate
+    integer                                         :: rankP, rankN     ! processus rank for shift (P= previous, N = next)
+    integer, dimension(2)                           :: tag_table        ! mpi message tag (for communicate rece_proc(1) and rece_proc(2))
+    integer, dimension(gp_s(1), gp_s(2))            :: proc_max_prev    ! maximum gap between previous processus and the receivers of its remeshing buffer
+    integer, dimension(gp_s(1), gp_s(2))            :: proc_min_next    ! minimum gap between next processus and the receivers of its remeshing buffer
+    integer                                         :: proc_max_abs     ! maximum of proc_max array
+    integer                                         :: proc_min_abs     ! minimum of proc_min array
+    integer, dimension(:,:), allocatable            :: first, last      ! Storage processus to which I will be the first (or the last) to send
+    ! remeshed particles
+    integer, dimension(:,:),allocatable             :: send_request     ! mpi status of nonblocking send
+    integer, dimension(2)                           :: send_request_gh  ! mpi status of nonblocking send (when exchanging "ghost")
+    integer                                         :: ierr             ! mpi error code
+    integer, dimension(MPI_STATUS_SIZE)             :: statut           ! mpi status
+    integer                                         :: ind1, ind2       ! indice of the current line inside the group
+    integer                                         :: nb
+    integer, dimension(2 + 2*gp_s(1)*gp_s(2))       :: rece_buffer      ! buffer for reception of rece_max
+
+
+    rece_proc = 3*N(direction)
+
+    proc_min = floor(real(send_group_min-1)/N_proc(direction))
+    proc_max = floor(real(send_group_max-1)/N_proc(direction))
+    proc_min_abs = minval(proc_min)
+    proc_max_abs = maxval(proc_max)
+
+    allocate(send_request(proc_min_abs:proc_max_abs,3))
+    send_request(:,3) = 0
+
+    ! -- Exchange send_block_min and send_block_max to determine if I am the first
+    ! or the last to send information to a given target processus. --
+    ! Compute message tag - we re-use tag_part_tag_NP id as using this procedure
+    ! suppose not using "AC_type_and_block"
+    tag_table = compute_tag(ind_group, tag_part_tag_NP, direction)
+    ! Send "ghost"
+    call mpi_Isend(proc_min(1,1), gp_s(1)*gp_s(2), MPI_INTEGER, neighbors(direction,1), tag_table(1), &
+         & D_comm(direction), send_request_gh(1), ierr)
+    call mpi_Isend(proc_max(1,1), gp_s(1)*gp_s(2), MPI_INTEGER, neighbors(direction,2), tag_table(2), &
+         & D_comm(direction), send_request_gh(2), ierr)
+    ! Receive it
+    call mpi_recv(proc_min_next(1,1), gp_s(1)*gp_s(2), MPI_INTEGER, neighbors(direction,2), tag_table(1), & 
+         & D_comm(direction), statut, ierr)
+    call mpi_recv(proc_max_prev(1,1), gp_s(1)*gp_s(2), MPI_INTEGER, neighbors(direction,1), tag_table(2), &
+         & D_comm(direction), statut, ierr)
+
+    ! -- Determine if I am the first or the last to send information to a given
+    ! processus and sort line by target processes for which I am the first and
+    ! for which I am the last. --
+    tag_table = compute_tag(ind_group, tag_obtsend_NP, direction)
+    allocate(first(2*(gp_s(1)*gp_s(2)+1),proc_min_abs:proc_max_abs))
+    first(2,:) = 2
+    allocate(last(2*(gp_s(1)*gp_s(2)+1),proc_min_abs:proc_max_abs))
+    last(2,:) = 2
+    do proc_gap = proc_min_abs, proc_max_abs
+       first(1,proc_gap) = -proc_gap
+       last(1,proc_gap) = -proc_gap
+    end do
+    do ind2 = 1, gp_s(2)
+       do ind1 = 1, gp_s(1)
+          ! Compute if I am the first, ie if:
+          ! a - proc_min <= proc_gap <= proc_max,
+          ! b - proc_gap > proc_max_prev -1.
+          do proc_gap = max(proc_min(ind1,ind2), proc_max_prev(ind1,ind2)), proc_max(ind1,ind2)
+             first(first(2,proc_gap)+1,proc_gap) = ind1
+             first(first(2,proc_gap)+2,proc_gap) = ind2
+             first(2 ,proc_gap) = first(2 ,proc_gap) + 2
+          end do
+          ! Compute if I am the last, ie if:
+          ! a - proc_min <= proc_gap <= proc_max,
+          ! b - proc_gap < proc_min_next+1.
+          do proc_gap = proc_min(ind1,ind2), min(proc_min_next(ind1,ind2), proc_max(ind1,ind2))
+             last(last(2,proc_gap)+1,proc_gap) = ind1
+             last(last(2,proc_gap)+2,proc_gap) = ind2
+             last(2 ,proc_gap) = last(2 ,proc_gap) + 2
+          end do
+       end do
+    end do
+
+
+    ! -- Send information if I am the first or the last --
+    do proc_gap = proc_min_abs, proc_max_abs
+       ! I am the first ?
+       if (first(2,proc_gap)>2) then
+          ! Compute the rank of the target processus
+          call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
+          if(rankN /= D_rank(direction)) then
+             call mpi_Isend(first(1,proc_gap), first(2,proc_gap), MPI_INTEGER, rankN, tag_table(1), D_comm(direction), &
+                  & send_request(proc_gap,1), ierr)
+             send_request(proc_gap,3) = 1
+          else
+             do nb = 3, first(2,proc_gap), 2
+                rece_proc(1,first(nb,proc_gap),first(nb+1,proc_gap)) = -proc_gap
+             end do
+          end if
+       end if
+       ! I am the last ?
+       if (last(2,proc_gap)>2) then
+          ! Compute the rank of the target processus
+          call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
+          if(rankN /= D_rank(direction)) then
+             call mpi_Isend(last(1,proc_gap), last(2,proc_gap), MPI_INTEGER, rankN, tag_table(2), D_comm(direction), &
+                  & send_request(proc_gap,2), ierr)
+             send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
+          else
+             do nb = 3, last(2,proc_gap), 2
+                rece_proc(2,last(nb,proc_gap),last(nb+1,proc_gap)) = -proc_gap
+             end do
+          end if
+       end if
+    end do
+
+    ! -- Receive it --
+    do while(any(rece_proc(1,:,:) == 3*N(direction)))
+       rece_buffer(2) = 2
+       call mpi_recv(rece_buffer(1), size(rece_buffer), MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+       do nb = 3, rece_buffer(2), 2
+          rece_proc(1,rece_buffer(nb),rece_buffer(nb+1)) = rece_buffer(1)
+       end do
+    end do
+    do while(any(rece_proc(2,:,:) == 3*N(direction)))
+       rece_buffer(2) = 2
+       call mpi_recv(rece_buffer(1),size(rece_buffer), MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+       do nb = 3, rece_buffer(2), 2
+          rece_proc(2,rece_buffer(nb),rece_buffer(nb+1)) = rece_buffer(1)
+       end do
+    end do
+
+    ! -- Free Isend buffer --
+    call mpi_wait(send_request_gh(1), statut, ierr)
+    call mpi_wait(send_request_gh(2), statut, ierr)
+    do proc_gap = proc_min_abs, proc_max_abs
+       select case (send_request(proc_gap,3))
+       case (3)
+          call mpi_wait(send_request(proc_gap,1), statut, ierr)
+          call mpi_wait(send_request(proc_gap,2), statut, ierr)
+       case (2)
+          call mpi_wait(send_request(proc_gap,2), statut, ierr)
+       case (1)
+          call mpi_wait(send_request(proc_gap,1), statut, ierr)
+       end select
+    end do
+
+  end subroutine AC_obtain_senders_com
+
+  ! ===================================================================
+  ! ====================     Remesh particles      ====================
+  ! ===================================================================
+
+  !> Remesh particle line with corrected lambda 2 formula - remeshing is done into
+  !! an real array
+  !!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        scal1D      = scalar field to advect
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        ind_min     = minimal indice of the send buffer
+  !!    @param[in]        ind_max     = maximal indice of the send buffer
+  !!    @param[in, out]   send_buffer = buffer use to remesh the scalar before to send it to the right subdomain
+  !! @details
+  !!     Use corrected lambda 2 remeshing formula. 
+  !! This remeshing formula depends on the particle type :
+  !!     1 - Is the particle tagged ?
+  !!     2 - Does it belong to a centered or a left block ?
+  !! Observe that tagged particles go by group of two : if the particles of a
+  !! block end are tagged, the one first one of the following block are
+  !! tagged too.
+  !! The following algorithm is write for block of minimal size.
+  !! @author = Jean-Baptiste Lagaert, LEGI/Ljk
+  subroutine AC_remesh_lambda2corrected_array(direction,p_pos_adim,scal1D,bl_type,bl_tag,ind_min,ind_max,send_buffer)
+
+    ! Input/Output
+    integer, intent(in)                                 :: direction
+    real(WP), dimension(:), intent(in)                  :: p_pos_adim
+    real(WP), dimension(:), intent(in)  :: scal1D
+    logical, dimension(:), intent(in)                   :: bl_type
+    logical, dimension(:), intent(in)                   :: bl_tag
+    integer, intent(in)                                 :: ind_min, ind_max
+    real(WP), dimension(ind_min:ind_max), intent(inout) :: send_buffer
+    ! Other local variables
+    integer     :: bl_ind                               ! indice of the current "block end".
+    integer     :: p_ind                                ! indice of the current particle
+
+    send_j_min = ind_min
+    send_j_max = ind_max
+
+    do p_ind = 1, N_proc(direction), bl_size
+       bl_ind = p_ind/bl_size + 1
+       if (bl_tag(bl_ind)) then
+          ! Tag case
+          ! XXX Debug : to activate only in purpose debug
+          !if (bl_type(ind).neqv. (.not. bl_type(ind+1))) then
+          !    write(*,'(a,x,3(L1,x),a,3(i0,a))'), 'error on remeshing particles: (tag,type(i), type(i+1)) =', &
+          !    & bl_tag(ind), bl_type(ind), bl_type(ind+1), ' and type must be different. Mesh point = (',i, ', ', j,', ',k,')'
+          !    write(*,'(a,x,i0)'),  'paramètres du blocs : ind =', bl_ind
+          !    stop
+          !end if
+          ! XXX Debug - end
+          if (bl_type(bl_ind)) then
+             ! tagged, the first particle belong to a centered block and the last to left block.
+             call AC_remesh_tag_CL(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
+          else
+             ! tagged, the first particle belong to a left block and the last to centered block.
+             call AC_remesh_tag_LC(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
+          end if
+       else
+          if (bl_type(bl_ind)) then
+             ! First particle is remeshed with center formula
+             call AC_remesh_center(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
+          else
+             ! First particle is remeshed with left formula
+             call AC_remesh_left(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
+          end if
+          if (bl_type(bl_ind+1)) then
+             ! Second particle is remeshed with center formula
+             call AC_remesh_center(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
+          else
+             ! Second particle is remeshed with left formula
+             call AC_remesh_left(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
+          end if
+       end if
+    end do
+
+  end subroutine AC_remesh_lambda2corrected_array
+
+
+  !> Remesh particle line with corrected lambda 2 formula - remeshing is done into
+  !! an array of pointer to real
+  !!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        scal1D      = scalar field to advect
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        ind_min     = minimal indice of the send buffer
+  !!    @param[in]        ind_max     = maximal indice of the send buffer
+  !!    @param[in, out]   send_pter   = array of pointers to the buffer use to remesh the scalar before to send it to the right subdomain
+  !! @details
+  !!     Use corrected lambda 2 remeshing formula. 
+  !! This remeshing formula depends on the particle type :
+  !!     1 - Is the particle tagged ?
+  !!     2 - Does it belong to a centered or a left block ?
+  !! Observe that tagged particles go by group of two : if the particles of a
+  !! block end are tagged, the one first one of the following block are
+  !! tagged too.
+  !! The following algorithm is write for block of minimal size.
+  !! @author = Jean-Baptiste Lagaert, LEGI/Ljk
+  subroutine AC_remesh_lambda2corrected_pter(direction,p_pos_adim,scal1D,bl_type,bl_tag,ind_min,ind_max,send_buffer)
+
+    ! Input/Output
+    integer, intent(in)                                         :: direction
+    real(WP), dimension(:), intent(in)                          :: p_pos_adim
+    real(WP), dimension(:), intent(in)          :: scal1D
+    logical, dimension(:), intent(in)                           :: bl_type
+    logical, dimension(:), intent(in)                           :: bl_tag
+    integer, intent(in)                                         :: ind_min, ind_max
+    type(real_pter), dimension(ind_min:ind_max), intent(inout)  :: send_buffer
+    ! Other local variables
+    integer     :: bl_ind                               ! indice of the current "block end".
+    integer     :: p_ind                                ! indice of the current particle
+
+    send_j_min = ind_min
+    send_j_max = ind_max
+
+    do p_ind = 1, N_proc(direction), bl_size
+       bl_ind = p_ind/bl_size + 1
+       if (bl_tag(bl_ind)) then
+          ! Tag case
+          ! XXX Debug : to activate only in purpose debug
+          !if (bl_type(ind).neqv. (.not. bl_type(ind+1))) then
+          !    write(*,'(a,x,3(L1,x),a,3(i0,a))'), 'error on remeshing particles: (tag,type(i), type(i+1)) =', &
+          !    & bl_tag(ind), bl_type(ind), bl_type(ind+1), ' and type must be different. Mesh point = (',i, ', ', j,', ',k,')'
+          !    write(*,'(a,x,i0)'),  'paramètres du blocs : ind =', bl_ind
+          !    stop
+          !end if
+          ! XXX Debug - end
+          if (bl_type(bl_ind)) then
+             ! tagged, the first particle belong to a centered block and the last to left block.
+             !!call AC_remesh_tag_CL(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
+          else
+             ! tagged, the first particle belong to a left block and the last to centered block.
+             !!call AC_remesh_tag_LC(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
+          end if
+       else
+          if (bl_type(bl_ind)) then
+             ! First particle is remeshed with center formula
+             !!call AC_remesh_center(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
+          else
+             ! First particle is remeshed with left formula
+             !!call AC_remesh_left(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
+          end if
+          if (bl_type(bl_ind+1)) then
+             ! Second particle is remeshed with center formula
+             !!call AC_remesh_center(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
+          else
+             ! Second particle is remeshed with left formula
+             !!call AC_remesh_left(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
+          end if
+       end if
+    end do
+
+  end subroutine AC_remesh_lambda2corrected_pter
+
+
+  !> Remesh particle line with corrected lambda 2 formula
+  !!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        scal1D      = scalar field to advect
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        ind_min     = minimal indice of the send buffer
+  !!    @param[in]        ind_max     = maximal indice of the send buffer
+  !!    @param[in, out]   send_buffer = buffer use to remesh the scalar before to send it to the right subdomain
+  !! @details
+  !!     Use corrected lambda 2 remeshing formula. 
+  !! This remeshing formula depends on the particle type :
+  !!     1 - Is the particle tagged ?
+  !!     2 - Does it belong to a centered or a left block ?
+  !! Observe that tagged particles go by group of two : if the particles of a
+  !! block end are tagged, the one first one of the following block are
+  !! tagged too.
+  !! The following algorithm is write for block of minimal size.
+  !! @author = Jean-Baptiste Lagaert, LEGI/Ljk
+  subroutine AC_remesh_lambda4corrected(direction, p_pos_adim, scal1D, bl_type, bl_tag, ind_min, ind_max, send_buffer)
+
+    ! Input/Output
+    integer, intent(in)                                 :: direction
+    real(WP), dimension(:), intent(in)                  :: p_pos_adim
+    real(WP), dimension(:), intent(in)  :: scal1D
+    logical, dimension(:), intent(in)                   :: bl_type
+    logical, dimension(:), intent(in)                   :: bl_tag
+    integer, intent(in)                                 :: ind_min, ind_max
+    real(WP), dimension(ind_min:ind_max), intent(inout) :: send_buffer
+    ! Other local variables
+    integer     :: bl_ind                               ! indice of the current "block end".
+    integer     :: p_ind                                ! indice of the current particle
+
+    send_j_min = ind_min
+    send_j_max = ind_max
+
+    do p_ind = 1, N_proc(direction), bl_size
+       bl_ind = p_ind/bl_size + 1
+       if (bl_tag(bl_ind)) then 
+          ! Tagged case
+          if (bl_type(bl_ind)) then
+             ! tagged, the first particle belong to a centered block and the last to left block.
+             call AC_remesh_O4_tag_CL(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), &
+                  & p_pos_adim(p_ind+2), scal1D(p_ind+2), p_pos_adim(p_ind+3), scal1D(p_ind+3), send_buffer)
+          else
+             ! tagged, the first particle belong to a left block and the last to centered block.
+             call AC_remesh_O4_tag_LC(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), &
+                  & p_pos_adim(p_ind+2), scal1D(p_ind+2), p_pos_adim(p_ind+3), scal1D(p_ind+3), send_buffer)
+          end if
+       else
+          ! No tag
+          if (bl_type(bl_ind)) then 
+             call AC_remesh_O4_center(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
+             call AC_remesh_O4_center(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
+          else
+             call AC_remesh_O4_left(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
+             call AC_remesh_O4_left(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
+          end if
+          if (bl_type(bl_ind+1)) then 
+             call AC_remesh_O4_center(p_pos_adim(p_ind+2),scal1D(p_ind+2), send_buffer)
+             call AC_remesh_O4_center(p_pos_adim(p_ind+3),scal1D(p_ind+3), send_buffer)
+          else
+             call AC_remesh_O4_left(p_pos_adim(p_ind+2),scal1D(p_ind+2), send_buffer)
+             call AC_remesh_O4_left(p_pos_adim(p_ind+3),scal1D(p_ind+3), send_buffer)
+          end if
+       end if
+    end do
+
+  end subroutine AC_remesh_lambda4corrected
+
+  !> Left remeshing formula of order 2
+  !!      @param[in]       pos_adim= adimensionned particle position
+  !!      @param[in]       sca     = scalar advected by the particle
+  !!      @param[in,out]   buffer  = temporaly remeshed scalar field
+  subroutine AC_remesh_left(pos_adim, sca, buffer)
+
+    !Input/Ouput
+    real(WP), intent(in)                                        :: pos_adim, sca
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Ohter local variables
+    integer     :: j0                       ! indice of the the nearest mesh points
+    real(WP)    :: bM, b0, bP               ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+    !j0 = floor(pos/d_sc(2))
+
+    ! Distance to mesh points
+    y0 = (pos_adim - float(j0))
+
+    ! Interpolation weights
+    bM=0.5*y0*(y0-1.)
+    b0=1.-y0**2
+    !bP=0.5*y0*(y0+1.)
+    bP=1. - (b0+bM)
+
+    ! remeshing
+    buffer(j0-1) = buffer(j0-1)   + bM*sca
+    buffer(j0)   = buffer(j0)     + b0*sca
+    buffer(j0+1) = buffer(j0+1)   + bP*sca
+
+  end subroutine AC_remesh_left
+
+  !> Centered remeshing formula of order 2
+  !!      @param[in]       pos_adim= adimensionned particle position
+  !!      @param[in]       sca     = scalar advected by the particle
+  !!      @param[in,out]   buffer  = temporaly remeshed scalar field
+  subroutine AC_remesh_center(pos_adim, sca, buffer)
+
+    ! Input/output
+    real(WP), intent(in)                                        :: pos_adim, sca
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: j0                       ! indice of the the nearest mesh points
+    real(WP)    :: bM, b0, bP               ! interpolation weight for the particles
+    real(WP)    :: y0             ! adimensionned distance to mesh points
+
+    j0 = nint(pos_adim)
+    !j0 = nint(pos/d_sc(2))
+
+    ! Distance to mesh points
+    y0 = (pos_adim - float(j0))
+    !y0 = (pos - float(j0)*d_sc(2))/d_sc(2)
+
+    ! Interpolation weights
+    bM=0.5*y0*(y0-1.)
+    b0=1.-y0**2
+    !bP=0.5*y0*(y0+1.)
+    bP=1. -b0 - bM
+
+    ! remeshing
+    buffer(j0-1) = buffer(j0-1)   + bM*sca
+    buffer(j0)   = buffer(j0)     + b0*sca
+    buffer(j0+1) = buffer(j0+1)   + bP*sca
+
+  end subroutine AC_remesh_center
+
+
+  !> Corrected remeshing formula for transition from Centered block to a Left block with a different indice (tagged particles)
+  !!    @param[in]       pos_adim= adimensionned particle position
+  !!    @param[in]       sca     = scalar advected by this particle
+  !!    @param[in]       posP_ad = adimensionned position of the second particle
+  !!    @param[in]       scaP    = scalar advected by this particle
+  !!    @param[in,out]   buffer  = temporaly remeshed scalar field
+  !! @details 
+  !!    Remeshing formula devoted to tagged particles.
+  !!    The particle group send into argument is composed of a block end and of the 
+  !!    begining of the next block. The first particles belong to a centered block
+  !!    and the last to a left one. The block have difference indice (tagged
+  !!    particles) and we have to use corrected formula.
+  subroutine AC_remesh_tag_CL(pos_adim, sca, posP_ad, scaP, buffer)
+
+    ! Input/Output
+    real(WP), intent(in)                                        :: pos_adim, sca, posP_ad, scaP
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP               ! indice of the the nearest mesh points
+    ! (they depend on the block type)
+    integer     :: j0_bis                   ! indice of the the nearest mesh point for the indP=ind+1 particle
+    real(WP)    :: aM, a0, bP, b0           ! interpolation weight for the particles
+    real(WP)    :: y0, y0_bis               ! adimensionned distance to mesh points
+
+    j0 = nint(pos_adim)
+    !j0 = nint(pos/d_sc(2))
+    j0_bis = floor(posP_ad)
+    !j0_bis = floor(posP/d_sc(2))
+    jM=j0-1
+    jP=j0+1
+
+    y0 = (pos_adim - float(j0))
+    !y0 = (pos - float(j0)*d_sc(2))/d_sc(2)
+    y0_bis = (posP_ad - float(j0_bis))
+    !y0_bis = (posP - float(j0_bis)*d_sc(2))/d_sc(2)
+
+    aM=0.5*y0*(y0-1)
+    a0=1.-aM
+    bP=0.5*y0_bis*(y0_bis+1.)
+    b0=1.-bP
+
+    ! Remeshing
+    buffer(jM)=buffer(jM)+aM*sca
+    buffer(j0)=buffer(j0)+a0*sca+b0*scaP
+    buffer(jP)=buffer(jP)+bP*scaP
+
+  end subroutine AC_remesh_tag_CL
+
+
+  !> Corrected remeshing formula for transition from Left block to a Centered  block with a different indice (tagged particles)
+  !!    @param[in]       pos_adim= adimensionned particle position
+  !!    @param[in]       sca     = scalar advected by this particle
+  !!    @param[in]       posP_ad = adimensionned position of the second particle
+  !!    @param[in]       scaP    = scalar advected by this particle
+  !!    @param[in,out]   buffer  = temporaly remeshed scalar field
+  !! @details 
+  !!    Remeshing formula devoted to tagged particles.
+  !!    The particle group send into argument is composed of a block end and of the 
+  !!    begining of the next block. The first particles belong to a left block
+  !!    and the last to a centered one. The block have difference indice (tagged
+  !!    particles) and we have to use corrected formula.
+  subroutine AC_remesh_tag_LC(pos_adim, sca, posP_ad, scaP, buffer)
+
+    ! Input/Output
+    real(WP), intent(in)                                        :: pos_adim, sca, posP_ad, scaP
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP, jP2, jP3             ! indice of the the nearest mesh points
+    ! (they depend on the block type)
+    integer     :: j0_bis                           ! indice of the the nearest mesh point for the indP=ind+1 particle
+    real(WP)    :: aM, a0, aP,aP2, b0, bP, bP2, bP3 ! interpolation weight for the particles
+    real(WP)    :: y0,yM_bis,y0_bis,yP_bis   ! adimensionned distance to mesh points
+    
+
+    ! Indice of mesh point used in order to remesh
+    j0 = floor(pos_adim)
+    !j0 = floor(pos/d_sc(2))
+    j0_bis = nint(posP_ad)
+    !j0_bis = nint(posP/d_sc(2))
+    jM=j0-1
+    jP=j0+1
+    jP2=j0+2
+    jP3=j0+3
+
+    ! Distance to mesh point
+    y0 = (pos_adim - float(j0))
+    !y0 = (pos - float(j0)*d_sc(2))/d_sc(2)
+    y0_bis = (posP_ad - float(j0_bis))
+    !y0_bis = (posP - float(j0_bis)*d_sc(2))/d_sc(2)
+    yP_bis=y0_bis+1
+    yM_bis=y0_bis-1
+
+    ! Interpolation weight
+    a0=1-y0**2
+    aP=y0
+    !aM=y0*yM/2.
+    aM = 0.5-(a0+aP)/2.
+    aP2=aM
+    bP=-y0_bis
+    bP2=1-y0_bis**2
+    !b0=y0_bis*yP_bis/2.
+    b0 = 0.5-(bP+bP2)/2.
+    bP3=b0
+
+    ! Remeshing
+    buffer(jM)= buffer(jM)+aM*sca
+    buffer(j0)= buffer(j0)+a0*sca+b0*scaP
+    buffer(jP)= buffer(jP)+aP*sca+bP*scaP
+    buffer(jP2)=buffer(jP2)+aP2*sca+bP2*scaP
+    buffer(jP3)=buffer(jP3)+bP3*scaP
+
+  end subroutine AC_remesh_tag_LC
+
+
+  !> Left remeshing formula of order 4
+  !!      @param[in]       pos_adim= adimensionned particle position
+  !!      @param[in]       sca     = scalar advected by the particle
+  !!      @param[in,out]   buffer  = temporaly remeshed scalar field
+  subroutine AC_remesh_O4_left(pos_adim, sca, buffer)
+
+    !Input/Ouput
+    real(WP), intent(in)                                        :: pos_adim, sca
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Ohter local variables
+    integer     :: j0                       ! indice of the the nearest mesh points
+    real(WP)    :: bM2, bM, b0, bP, bP2     ! interpolation weight for the particles
+    real(WP)    :: y0               ! adimensionned distance to mesh points
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+    !j0 = floor(pos/d_sc(2))
+
+    ! Distance to mesh points
+    y0 = (pos_adim - float(j0))
+
+    ! Interpolation weights
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2.+y0*(-1.+y0*(-2.+y0)))/24.0
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4.+y0*(4.+y0*(1.-y0)))/6.0
+    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0
+    bP =y0*(4+y0*(4-y0*(1.+y0)))/6.0
+    !bP2=(y0-1.)*y0*(y0+1.)*(y0+2.)/24.0
+    bP2=y0*(-2.+y0*(-1.+y0*(2.+y0)))/24.0
+    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
+    b0 = 1. -(bM2+bM+bP+bP2)
+
+    ! remeshing
+    buffer(j0-2) = buffer(j0-2)   + bM2*sca
+    buffer(j0-1) = buffer(j0-1)   + bM*sca
+    buffer(j0)   = buffer(j0)     + b0*sca
+    buffer(j0+1) = buffer(j0+1)   + bP*sca
+    buffer(j0+2) = buffer(j0+2)   + bP2*sca
+
+  end subroutine AC_remesh_O4_left
+
+  !> Centered remeshing formula of order 4
+  !!      @param[in]       pos_adim= adimensionned particle position
+  !!      @param[in]       sca     = scalar advected by the particle
+  !!      @param[in,out]   buffer  = temporaly remeshed scalar field
+  subroutine AC_remesh_O4_center(pos_adim, sca, buffer)
+
+    ! Input/output
+    real(WP), intent(in)                                        :: pos_adim, sca
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: j0                       ! indice of the the nearest mesh points
+    real(WP)    :: bM2, bM, b0, bP, bP2     ! interpolation weight for the particles
+    real(WP)    :: y0             ! adimensionned distance to mesh points
+    ! Mesh point used in remeshing formula
+    j0 = nint(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - float(j0))
+
+    ! Interpolation weights
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2.+y0*(-1.+y0*(-2.+y0)))/24.0
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4.+y0*(4.+y0*(1.-y0)))/6.0
+    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0
+    bP =y0*(4+y0*(4-y0*(1.+y0)))/6.0
+    !bP2=(y0-1.)*y0*(y0+1.)*(y0+2.)/24.0
+    bP2=y0*(-2.+y0*(-1.+y0*(2.+y0)))/24.0
+    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
+    b0 = 1. -(bM2+bM+bP+bP2)
+
+    ! remeshing
+    buffer(j0-2) = buffer(j0-2)   + bM2*sca
+    buffer(j0-1) = buffer(j0-1)   + bM*sca
+    buffer(j0)   = buffer(j0)     + b0*sca
+    buffer(j0+1) = buffer(j0+1)   + bP*sca
+    buffer(j0+2) = buffer(j0+2)   + bP2*sca
+
+  end subroutine AC_remesh_O4_center
+
+
+  !> Order 4 corrected remeshing formula for transition from Centered block to a Left block with a different indice (tagged particles)
+  !!    @param[in]       posM_ad = adimensionned position of the first particle
+  !!    @param[in]       scaM    = scalar advected by the first particle
+  !!    @param[in]       pos_adim= adimensionned particle position
+  !!    @param[in]       sca     = scalar advected by this particle
+  !!    @param[in]       posP_ad = adimensionned position of the second particle
+  !!    @param[in]       scaP    = scalar advected by this particle
+  !!    @param[in]       posP2_ad= adimensionned position of the fourth (and last) particle
+  !!    @param[in]       scaP2   = scalar advected by this particle
+  !!    @param[in,out]   buffer  = temporaly remeshed scalar field
+  !! @details 
+  !!    Remeshing formula devoted to tagged particles.
+  !!    The particle group send into argument is composed of a block end and of the 
+  !!    begining of the next block. The first particles belong to a centered block
+  !!    and the last to a left one. The block have difference indice (tagged
+  !!    particles) and we have to use corrected formula.
+  subroutine AC_remesh_O4_tag_CL(posM_ad, scaM, pos_adim, sca, posP_ad, scaP, posP2_ad, scaP2, buffer)
+
+    ! Input/Output
+    real(WP), intent(in)                                        :: pos_adim, sca, posP_ad, scaP
+    real(WP), intent(in)                                        :: posM_ad, scaM, posP2_ad, scaP2
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP, jP2          ! indice of the the nearest mesh points
+    ! (they depend on the block type)
+    real(WP)    :: aM3, aM2, aM, a0         ! interpolation weight for the particles
+    real(WP)    :: bM2, bM, b0, bP          ! interpolation weight for the particles
+    real(WP)    :: cM, c0, cP, cP2          ! interpolation weight for the particles
+    real(WP)    :: e0, eP, eP2, eP3         ! interpolation weight for the particles
+    real(WP)    :: yM, y0, yP, yP2          ! adimensionned distance to mesh points for each particles
+
+    ! Indice of mesh point used in order to remesh
+    jM = nint(posM_ad)
+    j0 = nint(pos_adim)
+    jP = floor(posP_ad)
+    jP2= floor(posP2_ad)
+
+    ! Distance to mesh point
+    yM = (posM_ad  - float(jM))
+    y0 = (pos_adim - float(j0))
+    yP = (posP_ad  - float(jP))
+    yP2= (posP2_ad - float(jP2))
+
+    ! Interpolation weights
+    !aM3=(yM-2.)*(yM-1.)*yM*(yM+1.)/24.0
+    aM3=yM*(2.+yM*(-1.+yM*(-2.+yM)))/24.0
+    !aM2=(2.-yM)*(yM-1.)*yM*(yM+2.)/6.0
+    aM2=yM*(-4.+yM*(4.+yM*(1.-yM)))/6.0
+    !aM =(yM-2.)*(yM-1.)*(yM+1.)*(yM+2.)/4.0
+    aM =(4.+(yM**2)*(-5.+yM**2))/4.0
+    !a0 =((2.-yM)*yM*(yM+1.)*(yM+2.)/6.0) + ((yM-1.)*yM*(yM+1.)*(yM+2.)/24.0)
+    a0 = 1. - (aM3+aM2+aM)
+
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2.+y0*(-1.+y0*(-2.+y0)))/24.0
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4.+y0*(4.+y0*(1.-y0)))/6.0
+    !bP =((y0+1)-1.)*(y0+1)*((y0+1)+1.)*((y0+1)+2.)/24.0 
+    bP =y0*(6.+y0*(11+y0*(6+y0)))/24.0 
+    !b0 =((y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0) + ((2.-y0)*y0*(y0+1.)*(y0+2.)/6.0) &
+    !        & + ((y0-1.)*y0*(y0+1.)*(y0+2.)/24.0) - bP
+    b0 = 1. - (bM2+bM+bP)
+
+    !cM =((yP-1.)-2.)*((yP-1.)-1.)*(yP-1.)*((yP-1.)+1.)/24.0
+    cM =yP*(-6.+yP*(11.+yP*(-6.+yP)))/24.0
+    !cP =(2.-yP)*yP*(yP+1.)*(yP+2.)/6.0
+    cP =yP*(4.+yP*(4.-yP*(1.+yP)))/6.0
+    !cP2=(yP-1.)*yP*(yP+1.)*(yP+2.)/24.0
+    cP2=yP*(-2.+yP*(-1.+yP*(2.+yP)))/24.0
+    !c0 =((yP-2.)*(yP-1.)*yP*(yP+1.)/24.0)+((2.-yP)*(yP-1.)*yP*(yP+2.)/6.0) &
+    !        & + ((yP-2.)*(yP-1.)*(yP+1.)*(yP+2.)/4.0) - cM
+    c0 = 1. - (cM+cP+cP2)
+
+    !eP =(yP2-2.)*(yP2-1.)*(yP2+1.)*(yP2+2.)/4.0
+    eP =1.+((yP2**2)*(-5+yP2**2)/4.0)
+    !eP2=(2.-yP2)*yP2*(yP2+1.)*(yP2+2.)/6.0
+    eP2=yP2*(4.+yP2*(4.-yP2*(1+yP2)))/6.0
+    !eP3=(yP2-1.)*yP2*(yP2+1.)*(yP2+2.)/24.0
+    eP3=yP2*(-2.+yP2*(-1.+yP2*(2+yP2)))/24.0
+    !e0 =((yP2-2.)*(yP2-1.)*yP2*(yP2+1.)/24.0) + ((2.-yP2)*(yP2-1.)*yP2*(yP2+2.)/6.0)
+    e0 = 1. - (eP+eP2+eP3)
+
+    ! remeshing
+    buffer(j0-3) = buffer(j0-3)   +aM3*scaM 
+    buffer(j0-2) = buffer(j0-2)   +aM2*scaM +bM2*sca
+    buffer(j0-1) = buffer(j0-1)   + aM*scaM + bM*sca  + cM*scaP
+    buffer(j0)   = buffer(j0)     + a0*scaM + b0*sca  + c0*scaP + e0*scaP2
+    buffer(j0+1) = buffer(j0+1)             + bP*sca  + cP*scaP + eP*scaP2
+    buffer(j0+2) = buffer(j0+2)                       +cP2*scaP +eP2*scaP2
+    buffer(j0+3) = buffer(j0+3)                                 +eP3*scaP2
+
+  end subroutine AC_remesh_O4_tag_CL
+
+
+  !> Corrected remeshing formula of order 3 for transition from Left block to a centered
+  !! block with a different indice (tagged particles). Use it for lambda 4 corrected scheme.
+  !!    @param[in]       posM_ad = adimensionned position of the first particle
+  !!    @param[in]       scaM    = scalar advected by the first particle
+  !!    @param[in]       pos_adim= adimensionned position of the second particle (the last of the first block)
+  !!    @param[in]       sca     = scalar advected by this particle
+  !!    @param[in]       posP_ad = adimensionned position of the third particle (wich is the first of the second block)
+  !!    @param[in]       scaP    = scalar advected by this particle
+  !!    @param[in]       posP2_ad= adimensionned position of the fourth (and last) particle
+  !!    @param[in]       scaP2   = scalar advected by this particle
+  !!    @param[in,out]   buffer  = temporaly remeshed scalar field
+  !! @details 
+  !!    Remeshing formula devoted to tagged particles.
+  !!    The particle group send into argument is composed of a block end and of the 
+  !!    begining of the next block. The first particles belong to a left block
+  !!    and the last to a centered one. The block have difference indice (tagged
+  !!    particles) and we have to use corrected formula.
+  subroutine AC_remesh_O4_tag_LC(posM_ad, scaM, pos_adim, sca, posP_ad, scaP, posP2_ad, scaP2, buffer)
+
+    ! Input/Output
+    real(WP), intent(in)                                        :: pos_adim, sca, posP_ad, scaP
+    real(WP), intent(in)                                        :: posM_ad, scaM, posP2_ad, scaP2
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP, jP2          ! indice of the the nearest mesh points
+    ! (they depend on the block type)
+    real(WP)    :: aM3, aM2, aM, a0, aP,aP2 ! interpolation weight for the particles
+    real(WP)    :: bM2, bM, b0, bP, bP2,bP3 ! interpolation weight for the particles
+    real(WP)    :: cM, c0, cP, cP2, cP3,cP4 ! interpolation weight for the particles
+    real(WP)    :: e0, eP, eP2, eP3,eP4,ep5 ! interpolation weight for the particles
+    real(WP)    :: yM, y0, yP, yP2          ! adimensionned distance to mesh points for each particles
+
+
+    ! Indice of mesh point used in order to remesh
+    jM = floor(posM_ad)
+    j0 = floor(pos_adim)
+    jP = nint(posP_ad)
+    jP2= nint(posP2_ad)
+
+    ! Distance to mesh point
+    yM = (posM_ad  - float(jM))
+    y0 = (pos_adim - float(j0))
+    yP = (posP_ad  - float(jP))
+    yP2= (posP2_ad - float(jP2))
+
+    ! Interpolation weights
+    !aM3=(yM-2.)*(yM-1.)*yM*(yM+1.)/24.0
+    aM3=yM*(2.+yM*(-1.+yM*(-2.+yM)))/24.0
+    !aM2=(2.-yM)*(yM-1.)*yM*(yM+2.)/6.0
+    aM2 =yM*(-4.+yM*(4.+yM*(1.-yM)))/6.0
+    !aM =(yM-2.)*(yM-1.)*(yM+1.)*(yM+2.)/4.0
+    aM =(4.+(yM**2)*(-5.+yM**2))/4.0
+    !a0 =((2.-yM)*yM*(yM+1.)*(yM+2.)/6.0) 
+    a0 =yM*(4+yM*(4-yM*(1.+yM)))/6.0
+    !aP2=(((yM-1.)-1.)*(yM-1.)*((yM-1.)+1.)*((yM-1.)+2.)/24.0)
+    !aP2=yM*(yM-2.)*(yM-1.)*(yM+1.)/24.0
+    aP2=aM3
+    !aP =((yM-1.)*yM*(yM+1.)*(yM+2.)/24.0) - aP2
+    !aP = 1.0 - (aM3+aM2+aM+a0+aP2)
+    aP = 1.0 - (2.*aM3+aM2+aM+a0)
+
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2.+y0*(-1.+y0*(-2.+y0)))/24.0
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4.+y0*(4.+y0*(1.-y0)))/6.0
+    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
+    b0 =(4.+(y0**2)*(-5.+y0**2))/4.0
+    !bP2=(2.-(y0-1.))*(y0-1.)*((y0-1.)+1.)*((y0-1.)+2.)/6.0
+    !bP2=y0*(3.-y0)*(y0-1.)*(y0+1.)/6.0
+    bP2=y0*(-3.+y0*(1.+y0*(3.-y0)))/6.0
+    !bP3=((y0-1.)-1.)*(y0-1.)*((y0-1.)+1.)*((y0-1.)+2.)/24.0
+    !bP3=y0*(y0-2.)*(y0-1.)*(y0+1.)/24.0
+    bP3 = bM2
+    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0 + ((y0-1.)*y0*(y0+1.)*(y0+2.)/24.0) &
+    !       & - (bP2 + bP3)
+    !bP = 1.0 - (bM2 + bM + b0 + bP2 + bP3)
+    bP = 1.0 - (2*bM2 + bM + b0 + bP2)
+
+    !cM =((yP+1)-2.)*((yP+1)-1.)*(yP+1)*((yP+1)+1.)/24.0
+    cM =(yP-1.)*yP*(yP+1)*(yP+2.)/24.0
+    !cM =yP*(-2.+yP*(-1.+yP*(2.+yP)))/24.0
+    !c0 =(2.-(yP+1))*((yP+1)-1.)*(yP+1)*((yP+1)+2.)/6.0
+    !c0 =(1.-yP)*yP*(yP+1)*(yP+3.)/6.0
+    c0 =yP*(3.+yP*(1.-yP*(3.+yP)))/6.0
+    !cP2=(yP-2.)*(yP-1.)*(yP+1.)*(yP+2.)/4.0
+    cP2=(4.+(yP**2)*(-5.+yP**2))/4.0
+    !cP3=(2.-yP)*yP*(yP+1.)*(yP+2.)/6.0
+    cP3=yP*(4+yP*(4-yP*(1.+yP)))/6.0
+    !cP4=(yP-1.)*yP*(yP+1.)*(yP+2.)/24.0
+    cP4=cM
+    !cP =(yP-2.)*(yP-1.)*yP*(yP+1.)/24.0 + ((2.-yP)*(yP-1.)*yP*(yP+2.)/6.0) &
+    !        & - (cM + c0)
+    cP = 1.0 - (cM+c0+cP2+cP3+cP4)
+
+    !e0 =((yP2+1)-2.)*((yP2+1)-1.)*(yP2+1)*((yP2+1)+1.)/24.0
+    !e0 =(yP2-1.)*yP2*(yP2+1)*(yP2+2.)/24.0
+    e0 =yP2*(-2.+yP2*(-1.+yP2*(2.+yP2)))/24.0
+    !eP2=(2.-yP2)*(yP2-1.)*yP2*(yP2+2.)/6.0
+    eP2=yP2*(-4.+yP2*(4.+yP2*(1.-yP2)))/6.0
+    !eP3=(yP2-2.)*(yP2-1.)*(yP2+1.)*(yP2+2.)/4.0
+    eP3=(4.+(yP2**2)*(-5.+yP2**2))/4.0
+    !eP4=(2.-yP2)*yP2*(yP2+1.)*(yP2+2.)/6.0
+    eP4=yP2*(4+yP2*(4-yP2*(1.+yP2)))/6.0
+    !eP5=(yP2-1.)*yP2*(yP2+1.)*(yP2+2.)/24.0
+    eP5=e0
+    !eP =((yP2-2.)*(yP2-1.)*yP2*(yP2+1.)/24.0) - e0
+    eP = 1.0 - (e0+eP2+eP3+eP4+eP5)
+
+    ! remeshing
+    buffer(j0-3) = buffer(j0-3)   +aM3*scaM 
+    buffer(j0-2) = buffer(j0-2)   +aM2*scaM +bM2*sca
+    buffer(j0-1) = buffer(j0-1)   + aM*scaM + bM*sca  + cM*scaP
+    buffer(j0)   = buffer(j0)     + a0*scaM + b0*sca  + c0*scaP + e0*scaP2
+    buffer(j0+1) = buffer(j0+1)   + aP*scaM + bP*sca  + cP*scaP + eP*scaP2
+    buffer(j0+2) = buffer(j0+2)   +aP2*scaM +bP2*sca  +cP2*scaP +eP2*scaP2
+    buffer(j0+3) = buffer(j0+3)             +bP3*sca  +cP3*scaP +eP3*scaP2
+    buffer(j0+4) = buffer(j0+4)                       +cP4*scaP +eP4*scaP2
+    buffer(j0+5) = buffer(j0+5)                                 +eP5*scaP2
+
+  end subroutine AC_remesh_O4_tag_LC
+
+
+  !> M'6 remeshing formula (order is more than 2, JM Ethancelin is working on
+  !! determining order).
+  !!      @param[in]       pos_adim= adimensionned particle position
+  !!      @param[in]       sca     = scalar advected by the particle
+  !!      @param[in,out]   buffer  = temporaly remeshed scalar field
+  subroutine AC_remesh_Mprime6(pos_adim, sca, buffer)
+
+    !Input/Ouput
+    real(WP), intent(in)                                        :: pos_adim, sca
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Ohter local variables
+    integer     :: j0                       ! indice of the the nearest mesh points
+    real(WP)    :: bM, bM2, b0, bP, bP2, bP3! interpolation weight for the particles
+    real(WP)    :: yM2, y0, yP, yP2     ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - float(j0))
+    !yM = y0+1
+    yP = y0+1
+    yM2 = y0-2
+    yP2 = y0+2
+
+    ! Interpolation weights
+    !bM2 =-(((y0+2.)-2)*(5.*(y0+2.)-8.)*((y0+2.)-3.)**3)/24.
+    bM2 = y0*(2. + y0*(-1. + y0*(-9. + (13. - 5.*y0)*y0)))/24.
+    !bM  =(y0+1.-1.)*(y0+1.-2.)*(25.*(y0+1.)**3-114.*(y0+1.)**2+153.*(y0+1.)-48.)/24.
+    bM = y0*(-16. + y0*(16. + y0*(39. + y0*(-64. + 25.*y0))))/24.
+    !bP  =-((1.-y0)-1.)*(25.*(1.-y0)**4-38.*(1.-y0)**3-3.*(1.-y0)**2+12.*(1.-y0)+12)/12.
+    bP = ( y0*(8. + y0*(8. + y0*(33. + y0*(-62. + 25.*y0)))))/12.
+    !bP2 = ((2.-y0)-1.)*((2.-y0)-2.)*(25.*(2.-y0)**3-114.*(2.-y0)**2+153.*(2.-y0)-48.)/24.
+    bP2 = (y0*(-2. + y0*(-1. + y0*(-33. + (61. - 25.*y0)*y0))))/24.
+    !bP3 =-(((3.-y0)-2)*(5.*(3.-y0)-8.)*((3.-y0)-3.)**3)/24.
+    bP3 = (y0**3)*(7. + y0*(5.*y0 - 12.))/24.
+    !b0  =-(y0-1.)*(25.*y0**4-38.*y0**3-3.*y0**2+12.*y0+12)/12.
+    !b0 = (12. + y0**2*(-15. + y0*(-35. + (63. - 25.*y0)*y0)))/12.
+    b0 = 1. - (bM2+bM+bP+bP2+bP3)
+
+    ! remeshing
+    buffer(j0-2) = buffer(j0-2)   + sca*bM2
+    buffer(j0-1) = buffer(j0-1)   + sca*bM
+    buffer(j0)   = buffer(j0)     + sca*b0
+    buffer(j0+1) = buffer(j0+1)   + sca*bP
+    buffer(j0+2) = buffer(j0+2)   + sca*bP2
+    buffer(j0+3) = buffer(j0+3)   + sca*bP3
+
+  end subroutine AC_remesh_Mprime6
+
+end module advec_common
+!> @}
diff --git a/HySoP/src/scalesInterface/particles/advec_common_line.f90 b/HySoP/src/scalesInterface/particles/advec_common_line.f90
new file mode 100644
index 000000000..33bcafb2f
--- /dev/null
+++ b/HySoP/src/scalesInterface/particles/advec_common_line.f90
@@ -0,0 +1,534 @@
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_common_line
+!
+!
+! DESCRIPTION: 
+!> The module ``advec_common_line'' gather function and subroutines used to advec scalar
+!! which are not specific to a direction. It contains some ``old''
+!! functions from ``advec_common'' which are not optimized.
+!! @details
+!! This module gathers functions and routines used to advec scalar which are not
+!! specific to a direction. More precisly, it provides function similar to 
+!! ``advec_common'' but which only work on single line rather than of
+!! group line. Considering how mpi parallelism works, working on single
+!! line are not opptimal. Therefore, these function are onbly here for
+!! debbugging and testing purposes. They also could be used to compute
+!! some spped-up. They are more simple and basic but less efficients.
+!!     
+!!      This module is automatically load when advec_common is used.
+!! Moreover, advec_common contains all interface to automatically use
+!! the right function whenever you want work on single line or on group of
+!! lines.
+!! Except for testing purpose, this module is not supposed to be used by the
+!! main code but only by the other advection module. More precisly, an final user
+!! must only used the generic "advec" module wich contain all the interface to
+!! solve the advection equation with the particle method, and to choose the
+!! remeshing formula, the dimensionnal splitting and everything else.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_common_line
+  
+  use mpi
+  use precision
+  use cart_topology
+  use advec_variables
+
+  implicit none
+
+  ! ===== Public procedures =====
+
+  !----- To interpolate velocity -----
+  !! public                              :: AC_obtain_receivers_line
+  !----- Determine block type and tag particles -----
+  public                              :: AC_type_and_block_line
+  !----- To remesh particles -----
+  public                              :: AC_obtain_senders_line
+  public                              :: AC_bufferToScalar_line
+
+contains
+
+  ! ===== Public procedure =====
+
+
+  ! ==================================================================================
+  ! ====================     Compute particle velocity (RK2)      ====================
+  ! ==================================================================================
+
+  !> Determine the set of processes wich will send me information during the velocity interpolation.
+  !!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
+  !!    @param[in]    ind_group       = coordinate of the current group of lines
+  !!    @param[in]    rece_ind_min    = minimal indice of mesh involved in remeshing particles (of the my local subdomains)
+  !!    @param[in]    rece_ind_max    = maximal indice of mesh involved in remeshing particles (of the my local subdomains)
+  !!    @param[out]   send_gap        = gap between my coordinate and the processes of minimal coordinate which will send information to me
+  !!    @param[out]   rece_gap        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+  !! @details
+  !!    Obtain the list of processus wich need a part of my local velocity field
+  !!    to interpolate the velocity used in the RK2 scheme to advect its particles.
+  subroutine AC_obtain_receivers(direction, ind_group, rece_ind_min, rece_ind_max, send_gap, rece_gap)
+    ! XXX Work only for periodic condition.
+
+    ! Input/Ouput
+    integer, intent(in)                 :: rece_ind_min, rece_ind_max
+    integer, intent(in)                 :: direction
+    integer, dimension(2), intent(in)   :: ind_group
+    integer, dimension(2), intent(out)  :: rece_gap, send_gap
+    integer, dimension(MPI_STATUS_SIZE) :: statut
+    ! Others 
+    integer                             :: proc_gap         ! gap between a processus coordinate (along the current 
+    ! direction) into the mpi-topology and my coordinate
+    integer                             :: rece_gapP        ! gap between the coordinate of the previous processus (in the current direction)
+    ! and the processes of maximal coordinate which will receive information from it
+    integer                             :: rece_gapN        ! same as above but for the next processus
+    integer                             :: rankP, rankN     ! processus rank for shift (P= previous, N = next)
+    integer                             :: tag_min, tag_max ! mpi message tag (for communicate rece_proc(1) and rece_proc(2))
+    integer                             :: send_request     ! mpi status of nonblocking send
+    integer                             :: send_request_bis ! mpi status of nonblocking send
+    integer                             :: ierr             ! mpi error code
+    integer, dimension(2)               :: tag_table        ! some mpi message tag
+    logical, dimension(:,:), allocatable:: test_request 
+    integer, dimension(:,:), allocatable:: s_request 
+
+    tag_min = 5
+    tag_max = 6
+
+    send_gap = 3*N(direction)
+
+    rece_gap(1) = floor(real(rece_ind_min-1)/N_proc(direction))
+    rece_gap(2) = floor(real(rece_ind_max-1)/N_proc(direction))
+
+    ! ===== Communicate with my neigbors -> obtain ghost ! ====
+    ! Compute their rank
+    call mpi_cart_shift(D_comm(direction), 0, 1, rankP, rankN, ierr)
+    ! Inform that about processus from which I need information
+    tag_table = compute_tag(ind_group, tag_obtrec_ghost_NP, direction)
+    call mpi_Isend(rece_gap(1), 1, MPI_INTEGER, rankP, tag_table(1), D_comm(direction), send_request, ierr)
+    call mpi_Isend(rece_gap(2), 1, MPI_INTEGER, rankN, tag_table(2), D_comm(direction), send_request_bis, ierr)
+    ! Receive the same message form my neighbors
+    call mpi_recv(rece_gapN, 1, MPI_INTEGER, rankN, tag_table(1), D_comm(direction), statut, ierr)
+    call mpi_recv(rece_gapP, 1, MPI_INTEGER, rankP, tag_table(2), D_comm(direction), statut, ierr)
+
+
+    ! ===== Send information if I am first or last =====
+    allocate(s_request(rece_gap(1):rece_gap(2),2))
+    allocate(test_request(rece_gap(1):rece_gap(2),2))
+    test_request = .false.
+    tag_table = compute_tag(ind_group, tag_obtrec_NP, direction)
+    do proc_gap = rece_gap(1), rece_gap(2)
+       ! Compute the rank of the target processus
+       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
+       ! Determine if I am the the first or the last processes (considering the current directory) 
+       ! to require information from this processus
+       if (proc_gap>rece_gapP-1) then
+          if(rankN /= D_rank(direction)) then
+             call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(1), D_comm(direction), s_request(proc_gap,1), ierr)
+             test_request(proc_gap,1) = .true.
+          else
+             send_gap(1) = -proc_gap
+          end if
+       end if
+       if (proc_gap<rece_gapN+1) then
+          if(rankN /= D_rank(direction)) then
+             test_request(proc_gap,2) = .true.
+             call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(2), D_comm(direction), s_request(proc_gap,2), ierr)
+          else
+             send_gap(2) = -proc_gap
+          end if
+       end if
+    end do
+
+
+    ! ===== Receive information form the first and the last processus which need a part of my local velocity field =====
+    if (send_gap(1) == 3*N(direction)) then
+       call mpi_recv(send_gap(1), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+    end if
+    if (send_gap(2) == 3*N(direction)) then
+       call mpi_recv(send_gap(2), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+    end if
+
+
+    call MPI_WAIT(send_request,statut,ierr)
+    call MPI_WAIT(send_request_bis,statut,ierr)
+    do proc_gap = rece_gap(1), rece_gap(2)
+       if (test_request(proc_gap,1).eqv. .true.) call MPI_WAIT(s_request(proc_gap,1),statut,ierr)
+       if (test_request(proc_gap,2)) call MPI_WAIT(s_request(proc_gap,2),statut,ierr)
+    end do
+    deallocate(s_request)
+    deallocate(test_request)
+
+  end subroutine AC_obtain_receivers
+
+
+  ! ===================================================================================================
+  ! ====================     Others than velocity interpolation and remeshing      ====================
+  ! ===================================================================================================
+  !> Determine type (center or left) of each block of a line and tag particle of this line to know where
+  !! corrected remeshing formula are recquired.
+  !!    @param[in]        dt          = time step
+  !!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        p_V         = particle velocity (along the current direction)
+  !!    @param[out]       bl_type     = table of blocks type (center of left)
+  !!    @param[out]       bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block 
+  !!                                    and the begining of the following one is tagged)
+  !! @details
+  !!        This subroutine deals with a single line. For each line of this group, it
+  !!    determine the type of each block of this line and where corrected remeshing
+  !!    formula are required. In those points, it tagg block transition (ie the end of
+  !!    the current block and the beginning of the following one) in order to indicate
+  !!    that corrected weigth have to be used during the remeshing.
+  subroutine AC_type_and_block_line(dt, direction, ind_group, p_V, &
+       & bl_type, bl_tag)
+
+    ! In/Out variables
+    real(WP), intent(in)                                    :: dt           ! time step
+    integer, intent(in)                                     :: direction
+    integer, dimension(2), intent(in)                       :: ind_group
+    real(WP), dimension(:), intent(in)                      :: p_V
+    !    logical, dimension(bl_nb(direction))+1), intent(out)     :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(:), intent(inout) :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(:), intent(inout)       :: bl_tag       ! indice of tagged particles
+    ! Local variables
+    real(WP), dimension(bl_nb(direction)+1)                 :: bl_lambdaMin ! for a particle, lamda = V*dt/dx ;  bl_lambdaMin = min of 
+    ! lambda on a block (take also into account first following particle)
+    real(WP)                                                :: lambP, lambN ! buffer to exchange some lambda min with other processus
+    integer, dimension(bl_nb(direction)+1)                  :: bl_ind       ! block index : integer as lambda in (bl_ind,bl_ind+1) for a left block
+    ! and lambda in (bl_ind-1/2, bl_ind+1/2) for a right block 
+    integer                                                 :: ind, i_p     ! some indices
+    real(WP)                                                :: cfl          ! = d_sc
+    integer                                                 :: rankP, rankN ! processus rank for shift (P= previous, N = next)
+    integer, dimension(2)                                   :: send_request ! mpi status of nonblocking send
+    integer, dimension(2)                                   :: rece_request ! mpi status of nonblocking receive
+    integer, dimension(MPI_STATUS_SIZE)                     :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(MPI_STATUS_SIZE)                     :: send_status  ! mpi status (for mpi_wait)
+    integer, dimension(2)                                   :: tag_table    ! other tags for mpi message
+    integer                                                 :: ierr         ! mpi error code
+
+    ! ===== Initialisation =====
+    cfl = dt/d_sc(direction)
+
+    ! ===== Compute bl_lambdaMin =====
+    ! -- Compute rank of my neighbor --
+    call mpi_cart_shift(D_comm(direction), 0, 1, rankP, rankN, ierr)
+
+    ! -- For the first block (1/2) --
+    ! The domain contains only its second half => exchange ghost with the previous processus
+    bl_lambdaMin(1) = minval(p_V(1:(bl_size/2)+1))*cfl
+    tag_table = compute_tag(ind_group, tag_part_tag_NP, direction)
+    ! Send message
+    call mpi_Isend(bl_lambdaMin(1), 1, MPI_DOUBLE_PRECISION, rankP, tag_table(1), D_comm(direction), send_request(1), ierr)
+    ! Receive it
+    call mpi_Irecv(lambN, 1, MPI_DOUBLE_PRECISION, rankN, tag_table(1), D_comm(direction), rece_request(1), ierr) 
+
+    ! -- For the last block (1/2) --
+    ! The processus contains only its first half => exchange ghost with the next processus
+    ind = bl_nb(direction) + 1
+    bl_lambdaMin(ind) = minval(p_V(N_proc(direction)-(bl_size/2)+1:N_proc(direction)))*cfl
+    ! Send message
+    call mpi_Isend(bl_lambdaMin(ind), 1, MPI_DOUBLE_PRECISION, rankN, tag_table(2), D_comm(direction), send_request(2), ierr)
+    ! Receive it
+    call mpi_Irecv(lambP, 1, MPI_DOUBLE_PRECISION, rankP, tag_table(2), D_comm(direction), rece_request(2), ierr) 
+
+    ! -- For the "middle" block --
+    do ind = 2, bl_nb(direction)
+       i_p = ((ind-1)*bl_size) + 1 - bl_size/2
+       bl_lambdaMin(ind) = minval(p_V(i_p:i_p+bl_size))*cfl
+    end do
+
+    ! -- For the first block (1/2) --
+    ! The domain contains only its second half => use exchanged ghost
+    ! Check reception
+    call mpi_wait(rece_request(2), rece_status, ierr)
+    bl_lambdaMin(1) = min(bl_lambdaMin(1), lambP)
+
+    ! -- For the last block (1/2) --
+    ! The processus contains only its first half => use exchanged ghost
+    ! Check reception
+    call mpi_wait(rece_request(1), rece_status, ierr)
+    ind = bl_nb(direction) + 1
+    bl_lambdaMin(ind) = min(bl_lambdaMin(ind), lambN)
+
+    ! ===== Compute block type and index =====
+    bl_ind = nint(bl_lambdaMin)
+    bl_type = (bl_lambdaMin<dble(bl_ind))
+
+
+    ! => center type if true, else left
+
+    ! ===== Tag particles =====
+    do ind = 1, bl_nb(direction)
+       bl_tag(ind) = ((bl_ind(ind)/=bl_ind(ind+1)) .and. (bl_type(ind).neqv.bl_type(ind+1)))
+    end do
+
+    call mpi_wait(send_request(1), send_status, ierr)
+    call mpi_wait(send_request(2), send_status, ierr)
+
+  end subroutine AC_type_and_block_line
+
+
+  ! ===================================================================
+  ! ====================     Remesh particles      ====================
+  ! ===================================================================
+
+  !> Determine the set of processes wich will send me information during the
+  !!  scalar remeshing. Use implicit computation rather than communication (only
+  !!  possible if particle are gather by block whith contrainst on velocity variation
+  !!  - as corrected lambda formula.) - work on only a line of particles.
+  !!    @param[in]    send_i_min  = minimal indice of the send buffer
+  !!    @param[in]    send_i_max  = maximal indice of the send buffer
+  !!    @param[in]    direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+  !!    @param[in]    ind_group   = coordinate of the current group of lines
+  !!    @param[out]   proc_min    = gap between my coordinate and the processes of minimal coordinate which will receive information from me
+  !!    @param[out]   proc_max    = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+  !!    @param[out]   rece_proc   = coordinate range of processes which will send me information during the remeshing.
+  !! @details
+  !!    Obtain the list of processus which contains some particles which belong to
+  !!    my subdomains after their advection (and thus which will be remeshing into
+  !!    my subdomain). This result is return as an interval [send_min; send_max].
+  !!    All the processus whose coordinate (into the current direction) belong to 
+  !!    this segment are involved into scalar remeshing into the current
+  !!    subdomains. This routine does not involve any computation to determine if
+  !!    a processus is the first or the last processes (considering its coordinate along 
+  !!    the current directory) to send remeshing information to a given processes.
+  !!    It directly compute it using contraints on velocity (as in corrected lambda
+  !!    scheme) When possible use it rather than AC_obtain_senders_com
+  subroutine AC_obtain_senders_line(send_i_min, send_i_max, direction, ind_group, proc_min, proc_max, rece_proc)
+    ! XXX Work only for periodic condition. For dirichlet conditions : it is
+    ! possible to not receive either rece_proc(1), either rece_proc(2) or none of
+    ! these two => detect it (track the first and the last particles) and deal with it.
+
+    ! Input/output
+    integer, intent(in)                 :: send_i_min
+    integer, intent(in)                 :: send_i_max
+    integer, intent(in)                 :: direction
+    integer, dimension(2), intent(in)   :: ind_group
+    integer(kind=4), intent(out)        :: proc_min, proc_max
+    integer, dimension(2), intent(out)  :: rece_proc
+    integer, dimension(MPI_STATUS_SIZE) :: statut
+    ! Other local variable
+    integer(kind=4)                     :: proc_gap         ! gap between a processus coordinate (along the current 
+    ! direction) into the mpi-topology and my coordinate
+    integer                             :: rankP, rankN     ! processus rank for shift (P= previous, N = next)
+    integer, dimension(2)               :: tag_table        ! mpi message tag (for communicate rece_proc(1) and rece_proc(2))
+    integer, dimension(:,:),allocatable :: send_request     ! mpi status of nonblocking send
+    integer                             :: ierr             ! mpi error code
+
+    tag_table = compute_tag(ind_group, tag_obtsend_NP, direction)
+
+    rece_proc = 3*N(direction)
+
+    proc_min = floor(real(send_i_min-1)/N_proc(direction))
+    proc_max = floor(real(send_i_max-1)/N_proc(direction))
+
+    allocate(send_request(proc_min:proc_max,3))
+    send_request(:,3) = 0
+
+    ! Send
+    do proc_gap = proc_min, proc_max
+       ! Compute the rank of the target processus
+       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
+       ! Determine if I am the the first or the last processes (considering my
+       ! coordinate along the current directory) to send information to
+       ! one of these processes.
+       ! Note that local indice go from 1 to N_proc (fortran).
+       ! I am the first ?
+       if ((send_i_min< +1-2*bl_bound_size + proc_gap*N_proc(direction)+1).AND. &
+            & (send_i_max>= proc_gap*N_proc(direction))) then
+          if(rankN /= D_rank(direction)) then
+             call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(1), D_comm(direction), &
+                  & send_request(proc_gap,1), ierr)
+             send_request(proc_gap,3) = 1
+          else
+             rece_proc(1) = -proc_gap
+          end if
+       end if
+       ! I am the last ?
+       if ((send_i_max > -1+2*bl_bound_size + (proc_gap+1)*N_proc(direction)) &
+            & .AND.(send_i_min<= (proc_gap+1)*N_proc(direction))) then
+          if(rankN /= D_rank(direction)) then
+             call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(2), D_comm(direction), &
+                  & send_request(proc_gap,2), ierr)
+             send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
+          else
+             rece_proc(2) = -proc_gap
+          end if
+       end if
+    end do
+
+
+    ! Receive
+    if (rece_proc(1) == 3*N(direction)) then
+       call mpi_recv(rece_proc(1), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+    end if
+    if (rece_proc(2) == 3*N(direction)) then
+       call mpi_recv(rece_proc(2), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+    end if
+
+    ! Free Isend buffer
+    do proc_gap = proc_min, proc_max
+       select case (send_request(proc_gap,3))
+       case (3)
+          call mpi_wait(send_request(proc_gap,1), statut, ierr)
+          call mpi_wait(send_request(proc_gap,2), statut, ierr)
+       case (2)
+          call mpi_wait(send_request(proc_gap,2), statut, ierr)
+       case (1)
+          call mpi_wait(send_request(proc_gap,1), statut, ierr)
+       end select
+    end do
+
+  end subroutine AC_obtain_senders_line
+
+
+  !> Common procedure for remeshing wich perform all the communcation and provide
+  !! the update scalar field.
+  !!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        send_i_min  = minimal indice of the send buffer
+  !!    @param[in]        send_i_max  = maximal indice of the send buffer
+  !!    @param[out]       proc_min    = gap between my coordinate and the processes of minimal coordinate which will receive information from me
+  !!    @param[out]       proc_max    = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+  !!    @param[out]       rece_proc   = coordinate range of processes which will send me information during the remeshing.
+  !!    @param[in]        send_buffer = buffer use to remesh the scalar before to send it to the right subdomain
+  !!    @param[in,out]    scal1D      = mono-dimensionnal scalar field to advect
+  !! @details
+  !!    Remeshing are done in a local buffer. This subroutine distribute this buffer 
+  !!    to the right processes, receive the buffer send and update the scalar field.
+  subroutine AC_bufferToScalar_line(direction, ind_group, send_i_min, send_i_max, proc_min, proc_max, rece_proc,send_buffer, scal1D)
+
+    ! Input/Ouptut
+    integer, intent(in)                                     :: direction
+    integer, dimension(2), intent(in)                       :: ind_group
+    integer, intent(in)                                     :: send_i_min
+    integer, intent(in)                                     :: send_i_max
+    integer, dimension(2), intent(in)                       :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the 
+    ! one from which  I will receive data
+    integer, intent(in)                                     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, intent(in)                                     :: proc_max     ! smaller gap between me and the processes to where I send data
+    real(WP), dimension(send_i_min:send_i_max), intent(in)  :: send_buffer
+ !   real(WP), dimension(N_proc(direction)), intent(inout)   :: scal1D
+    real(WP), dimension(:), intent(inout)   :: scal1D
+
+    ! Variables used to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+    ! design something I send (resp. I receive).
+    integer                             :: i            ! table indice
+    integer                             :: proc_gap     ! gap between my Y-coordinate and the one of the processus 
+    real(WP), dimension(:), allocatable :: rece_buffer  ! buffer use to stock received scalar field
+    integer                             :: send_gap     ! number of mesh between my and another processus
+    integer,dimension(:,:), allocatable :: rece_range   ! range of (local) indice where the received scalar field has to be save
+    integer, dimension(2)               :: send_range   ! range of (local) indice where the send scalar field has to be save
+    integer, dimension(:), allocatable  :: rece_request ! mpi communication request (handle) of nonblocking receive
+    integer, dimension(:), allocatable  :: rece_rank    ! rank of processus from wich I receive data
+    integer                             :: send_rank    ! rank of processus to which I send data
+    integer                             :: rankP        ! rank used in mpi_cart_shift
+    integer, dimension(MPI_STATUS_SIZE) :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(MPI_STATUS_SIZE) :: send_status  ! mpi status (for mpi_wait)
+    integer, dimension(:,:),allocatable :: send_request ! mpi status of nonblocking send
+    integer                             :: rece_i_min   ! the minimal indice from where belong the scalar field I receive
+    integer                             :: rece_i_max   ! the maximal indice from where belong the scalar field I receive
+    integer                             :: ierr         ! mpi error code
+    integer                             :: comm_size    ! number of element to send/receive
+    integer                             :: tag          ! mpi message tag
+    ! with wich I communicate.
+
+    ! Send the information
+    allocate(send_request(proc_min:proc_max,3))
+    send_request(:,3)=0
+    do proc_gap = proc_min, proc_max
+       ! Compute the rank of the target processus
+       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+       send_gap = proc_gap*N_proc(direction)
+       send_range(1) = max(send_i_min, send_gap+1) ! fortran => indice start from 0
+       send_range(2) = min(send_i_max, send_gap+N_proc(direction)) 
+       if (send_rank/=D_rank(direction)) then
+          ! Determine quantity of information to send
+          comm_size = send_range(2)-send_range(1)+1
+          ! Send the range of the scalar field send
+          tag = compute_tag(ind_group, tag_bufToScal_range, direction, proc_gap)
+          call mpi_Isend(send_range(1), 2, MPI_INTEGER, send_rank, tag, D_comm(direction), send_request(proc_gap,1)&
+               & , ierr)
+          ! And send the buffer
+          tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, proc_gap)
+          call mpi_Isend(send_buffer(send_range(1)),comm_size, MPI_DOUBLE_PRECISION, send_rank, &
+               & tag, D_comm(direction), send_request(proc_gap,2), ierr)
+          send_request(proc_gap,3) = 1
+       else
+          ! I have to distribute the buffer in myself
+          do i = send_range(1), send_range(2)
+             scal1D(i-send_gap) = scal1D(i-send_gap) + send_buffer(i)
+          end do
+       end if
+    end do
+
+    ! Receive information
+    ! Allocate field
+    allocate(rece_rank(rece_proc(1):rece_proc(2)))
+    allocate(rece_range(2,rece_proc(1):rece_proc(2)))  ! be careful that mpi use contiguous memory element
+    allocate(rece_request(rece_proc(1):rece_proc(2)))
+    ! Receive range
+    do proc_gap = rece_proc(1), rece_proc(2)
+       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rece_rank(proc_gap), ierr)
+       if (rece_rank(proc_gap)/=D_rank(direction)) then
+          tag = compute_tag(ind_group, tag_bufToScal_range, direction, -proc_gap)
+          call mpi_Irecv(rece_range(1,proc_gap), 2, MPI_INTEGER, rece_rank(proc_gap), tag, D_comm(direction), &
+               & rece_request(proc_gap), ierr) ! we use tag = source rank
+       end if
+    end do
+    ! Check reception
+    do proc_gap = rece_proc(1), rece_proc(2)
+       if (rece_rank(proc_gap)/=D_rank(direction)) then
+          call mpi_wait(rece_request(proc_gap), rece_status, ierr)
+       end if
+    end do
+    deallocate(rece_request)
+    ! Receive buffer and remesh it
+    ! XXX Possible optimisation : an optimal code will
+    !   1 - have non-blocking reception of scalar buffers
+    !   2 - check when a reception is done and then update the scalar
+    !   3 - iterate step 2 until all message was rece and that the scalar
+    !       field was update with all the scalar buffers
+    do proc_gap = rece_proc(1), rece_proc(2)
+       if (rece_rank(proc_gap)/=D_rank(direction)) then
+          rece_i_min = rece_range(1,proc_gap)
+          rece_i_max = rece_range(2,proc_gap)
+          ! Receive information
+          comm_size=(rece_i_max-rece_i_min+1)
+          allocate(rece_buffer(rece_i_min:rece_i_max)) ! XXX possible optimisation
+          ! by allocating one time to the max size, note that the range use in
+          ! this allocation instruction is include in (1, N_proc(2))
+          tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, -proc_gap)
+          call mpi_recv(rece_buffer(rece_i_min), comm_size, MPI_DOUBLE_PRECISION, &
+               & rece_rank(proc_gap), tag, D_comm(direction), rece_status, ierr)
+          ! Update the scalar field
+          send_gap = proc_gap*N_proc(direction)
+          scal1D(rece_i_min+send_gap:rece_i_max+send_gap) = scal1D(rece_i_min+send_gap:rece_i_max+send_gap) &
+               & + rece_buffer(rece_i_min:rece_i_max)
+          deallocate(rece_buffer)
+       end if
+    end do
+
+
+    ! Free Isend buffer
+    do proc_gap = proc_min, proc_max
+       if (send_request(proc_gap,3)==1) then
+          call mpi_wait(send_request(proc_gap,1), send_status, ierr)
+          call mpi_wait(send_request(proc_gap,2), send_status, ierr)
+       end if
+    end do
+    deallocate(send_request)
+
+    deallocate(rece_range)
+    deallocate(rece_rank)
+
+  end subroutine AC_bufferToScalar_line
+
+end module advec_common_line
diff --git a/HySoP/src/scalesInterface/particles/advec_remesh_formula.f90 b/HySoP/src/scalesInterface/particles/advec_remesh_formula.f90
new file mode 100644
index 000000000..0db3bb2a9
--- /dev/null
+++ b/HySoP/src/scalesInterface/particles/advec_remesh_formula.f90
@@ -0,0 +1,61 @@
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_remeshing_formula
+!
+!
+! DESCRIPTION: 
+!> This module gathers all the remeshing formula. These interpolation
+!!polynom allow to re-distribute particles on mesh grid at each
+!! iterations.
+!! @details
+!! It provides lambda 2 corrected, lambda 4 corrected and M'6 remeshing formula.
+!! The remeshing of type "lambda corrected" are design for large time
+!! step. The M'6 formula appears as being stable for large time step, but 
+!! the numerical analysis remeains todo.
+!!     This module also provide some wraper to remesh a complete line
+!! of particles (with the different formula) and to do it either on a
+!! array or into a array of pointer to reals. In order to gather
+!! communications between different lines of particles, it is better to
+!! use continguous memory space for mesh point with belong to the same
+!! processes and thus to use and array of pointer to easily deal with it.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_remeshing_formula
+
+  use precision
+  use string
+  use advec_common
+
+  implicit none
+
+  ! === Hearder =====
+  !----- Order 2 remeshing formula -----
+  public                              :: AC_remesh_left      ! left remeshing formula
+  public                              :: AC_remesh_center    ! centered remeshing formula
+  public                              :: AC_remesh_tag_CL    ! corrected formula for tagged particles : transition from C to L block.
+  public                              :: AC_remesh_tag_LC    ! corrected formula for tagged particles : transition from L to C block
+  !----- Order 4 remeshing formula -----
+  public                              :: AC_remesh_O4_left   ! left remeshing formula
+  public                              :: AC_remesh_O4_center ! centered remeshing formula
+  public                              :: AC_remesh_O4_tag_CL ! corrected formula for tagged particles : transition from C to L block.
+  public                              :: AC_remesh_O4_tag_LC ! corrected formula for tagged particles : transition from L to C block
+  !----- M'6 remeshing formula -----
+  public                              :: AC_remesh_Mprime6   ! use 6 grid point, 3 for each side of the particle.
+
+  ! XXX Si passage au fortran 2003 : basculer toutes ces variables dans le module
+  ! advec (fichier advec.F90) et mettre toutes les variables en protected.
+  ! Seul la procédure "advec_init" doit pouvoir les modifier, mais de nombreuses
+  ! procédures doivent pouvoir y accéder.
+
+  !===== Interface =====
+  interface AC_remesh_lambda2corrected
+     module procedure AC_remesh_lambda2corrected_pter, AC_remesh_lambda2corrected_array
+  end interface AC_remesh_lambda2corrected
+
+end module advec_remeshing_formula
diff --git a/HySoP/src/scalesInterface/particles/advec_variables.f90 b/HySoP/src/scalesInterface/particles/advec_variables.f90
new file mode 100644
index 000000000..471315c19
--- /dev/null
+++ b/HySoP/src/scalesInterface/particles/advec_variables.f90
@@ -0,0 +1,175 @@
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_variables
+!
+!
+! DESCRIPTION: 
+!> The module ``advec_variables'' gather all variables that have to been shared by diffrenrent advection
+!! modules. It also provide a set of method to set the protected or private variables to the right values.
+!! @details
+!! It contains the variables common to the solver along each direction and other generic variables used for the
+!! advection based on the particle method. It provied functions to set
+!! them to right values depending on the choosen remeshing formula.
+!! 
+!! This module is not supposed to be used by the main code but only by the other advection module. 
+!! More precisly, a final user must only used the generic "advec" module wich contains all the interface 
+!! to initialize the solver (eg choosing the remeshing formula and the dimension splitting) and to solve 
+!! the advection equation with the particle method.
+!!
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_variables
+
+  use string
+  use precision
+  use cart_topology, only : N_proc,cart_rank
+
+  implicit none
+
+  ! --- In order to create an array of pointer ---
+  type real_pter
+     real(WP), pointer                           :: pter
+  end type real_pter
+  ! ---------------------------------------------
+
+  ! ===== Public and protected variables =====
+  ! ----- Minimal and maximal indice of the buffer used in the different communication -----
+  !> minimal indice of the send buffer
+  integer, public                             :: send_j_min
+  !> maximal indice of the send buffer
+  integer, public                             :: send_j_max
+  !> minimal indice used in remeshing of each line
+  integer,public,dimension(:,:),allocatable   :: send_group_min
+  !> maximal indice used in remeshing of each line
+  integer,public,dimension(:,:),allocatable   :: send_group_max
+  ! ------ Block infromation -----
+  ! solver choosen
+  character(len=str_short), protected         :: type_solv
+  !> number of particles in a block
+  integer, protected                          :: bl_size           
+  !> distance between the "central" mesh point and the extream mesh point of the stencil of points used to remesh a particle
+  integer, protected                          :: bl_bound_size     
+  !> Number of common meshes used in the remeshing of two successive particle
+  !! (in case off standart (ie non corrected) remeshing formula)).
+  integer, dimension(2), protected            :: bl_remesh_superposition
+  !> Number of block on each processus along each direction
+  integer, dimension(3), protected            :: bl_nb
+
+  ! ------ To ensure unique mpi message tag -----
+  ! Tag generate with a proc_gap
+  !> To create tag used in AC_particle_velocity to send range
+  integer, dimension(2), parameter            :: tag_velo_range = (/ 0,1 /)
+  !> To create tag used in AC_particle_velocity to send velocity field
+  integer, dimension(2), parameter            :: tag_velo_V = (/ 0,2 /)
+  !> To create tag used in bufferToScalar to send range of buffer which will be send
+  integer, dimension(2), parameter            :: tag_bufToScal_range = (/ 0,3 /)
+  !> To create tag used in bufferToScalar to send the buffer used to remesh particles
+  integer, dimension(2), parameter            :: tag_bufToScal_buffer = (/ 0,4 /)
+
+  ! Tag generate with "compute_gap_NP"
+  !> To create tag used in AC_obtain_recevers to send ghost
+  integer, dimension(2), parameter            :: tag_obtrec_ghost_NP = (/ 0, 1/)
+  !> To create tag used in AC_type_and_bloc to exchange ghost with neighbors
+  integer, dimension(2), parameter            :: tag_part_tag_NP = (/ 0, 2/)
+  !> To create tag used in AC_obtain_recevers to send message about recevers of minimal and maximal rank
+  integer, dimension(2), parameter            :: tag_obtrec_NP = (/ 0, 3/)
+  !> To create tag used in AC_obtain_receivers to send message about senders of minimal and maximal rank
+  integer, dimension(2), parameter            :: tag_obtsend_NP = (/ 0, 4/)
+
+  ! ===== Public procedures =====
+  !----- Initialize solver -----
+  public                                      :: AC_solver_init
+  public                                      :: AC_set_part_bound_size
+
+contains
+
+  ! ====================================================================
+  ! ====================    Initialize context      ====================
+  ! ====================================================================
+
+  !> Initialize some variable related to the solver implementation (and which
+  !! depend of the resmeshing formula choosen and the dimmensionnal splitting used).
+  !!    @param[in]        part_solv   = remeshing formula choosen (spcae order, ...)
+  !!    @param[in]        verbosity   = to display info about chosen remeshing formula (optional)
+  subroutine AC_solver_init(part_solv, verbosity)
+
+    ! Input/Output
+    character(len=*), optional, intent(in)  ::  part_solv
+    logical, optional, intent(in)  ::  verbosity
+    ! Others
+    logical :: verbose
+
+    ! Set verbosity
+    verbose = .true.
+    if (present(verbosity)) verbose = verbosity
+
+    if (present(part_solv)) type_solv = part_solv
+
+    ! Initialisation part adapted to each method
+    select case(type_solv)
+    case('p_O2')
+       bl_size = 2
+       bl_bound_size = 1
+       if ((cart_rank==0).and.(verbose)) then
+          write(*,'(6x,a)') '========== Advection scheme ========='
+          write(*,'(6x,a)') ' particle method, corrected lambda 2 '
+          write(*,'(6x,a)') '====================================='
+       end if
+    case('p_O4')
+       bl_size = 4
+       bl_bound_size = 2
+       if ((cart_rank==0).and.(verbose)) then
+          write(*,'(6x,a)') '========== Advection scheme ========='
+          write(*,'(6x,a)') ' particle method, corrected lambda 4 '
+          write(*,'(6x,a)') '====================================='
+       end if
+    case('p_M6')
+       bl_size = 1
+       bl_bound_size = 3   ! Be aware : don't use it to compute superposition between
+       ! mpi processes (not as predictible as corrected scheme)
+       if ((cart_rank==0).and.(verbose)) then
+          write(*,'(6x,a)') '========== Advection scheme ========='
+          write(*,'(6x,a)') ' particle method, corrected M prime 6'
+          write(*,'(6x,a)') '====================================='
+       end if
+    case default
+       bl_size = 2
+       bl_bound_size = 1
+       if ((cart_rank==0).and.(verbose)) then
+          write(*,'(6x,a)') '========== Advection scheme ========='
+          write(*,'(6x,a)') ' particle method, corrected lambda 2 '
+          write(*,'(6x,a)') '====================================='
+       end if
+    end select
+
+    ! Check if the subdomain contain a number of mesh wich could be divided by bl_size
+    if ((modulo(N_proc(1),bl_size)/=0).OR.(modulo(N_proc(2),bl_size)/=0).OR.(modulo(N_proc(3),bl_size)/=0)) then
+       if (cart_rank ==0) print*, 'Number of mesh by processus must be a muliple of ', bl_size
+       stop
+    end if
+
+    ! Compute local number of block along each direction
+    bl_nb = N_proc/bl_size
+
+  end subroutine AC_solver_init
+
+  !> Manually change protected variable "bl_bound_size" - purpose test only (for
+  !! auto-validation tests)
+  !!    @param[in]        bound_size   = wanted value of "bl_bound_part"
+  subroutine AC_set_part_bound_size(bound_size)
+
+    ! Input/Ouput
+    integer, intent(in) :: bound_size
+
+    bl_bound_size = bound_size
+
+  end subroutine AC_set_part_bound_size
+
+
+end module advec_variables
diff --git a/HySoP/src/scalesInterface/precision.f90 b/HySoP/src/scalesInterface/precision.f90
new file mode 100644
index 000000000..3cb6a31bf
--- /dev/null
+++ b/HySoP/src/scalesInterface/precision.f90
@@ -0,0 +1,28 @@
+!------------------------------------------------------------------------------
+!
+! MODULE: precision
+!
+!> @author
+!> Guillaume Balarac, LEGI
+!
+! DESCRIPTION: 
+!> The aim of this module is set some parameters to fix the working data
+!> representation in the code. It is set to double precision for REAL.
+!------------------------------------------------------------------------------
+
+MODULE precision
+
+  IMPLICIT NONE
+  INTEGER, PARAMETER  :: SP = kind(1.0)
+  INTEGER, PARAMETER  :: DP = kind(1.0d0)
+  INTEGER, PARAMETER  :: WP = DP
+  REAL(WP), PRIVATE   :: sample_real_at_WP
+  REAL(WP), PARAMETER :: MAX_REAL_WP = HUGE(sample_real_at_WP)
+  INTEGER, PRIVATE    :: sample_int
+  INTEGER, PARAMETER  :: MAX_INTEGER = HUGE(sample_int)
+  !> the MPI type for REAL exchanges in simple or double precision
+  INTEGER, PUBLIC :: MPI_REAL_WP
+  !> the MPI type for COMPLEX exchanges in simple or double precision
+  INTEGER, PUBLIC :: MPI_COMPLEX_WP
+
+END MODULE precision
diff --git a/HySoP/src/scalesInterface/string.f90 b/HySoP/src/scalesInterface/string.f90
new file mode 100644
index 000000000..c54aadb33
--- /dev/null
+++ b/HySoP/src/scalesInterface/string.f90
@@ -0,0 +1,18 @@
+!------------------------------------------------------------------------------
+!
+! MODULE: string
+!
+!> @author
+!> Guillaume Balarac, LEGI
+!
+! DESCRIPTION: 
+!> The aim of this module is set some parameters to fix the sizes of
+!> strings datas in the code
+!------------------------------------------------------------------------------
+module string
+  implicit none
+  integer, parameter :: str_short  = 8
+  integer, parameter :: str_medium = 64
+  integer, parameter :: str_long   = 4096
+end module string
+ 
-- 
GitLab