From 0412a7c7700fa08902bea30ecf70b774ac091ac4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Franck=20P=C3=A9rignon?= <franck.perignon@imag.fr>
Date: Wed, 13 Jun 2012 13:48:25 +0000
Subject: [PATCH] parmepy update

---
 HySoP/hysop/__init__.py                       |    7 +-
 .../Domain/Box.py => HySoP/hysop/box.py       |    0
 HySoP/hysop/constants.py                      |   27 +
 .../Domain => HySoP/hysop/domain}/__init__.py |    0
 .../hysop/domain/discrete.py                  |    0
 .../hysop/domain/domain.py                    |    0
 .../hysop/domain/grid.py                      |    0
 HySoP/hysop/scales2py/scales2py.f90           |   84 +
 HySoP/setup.py.in                             |   15 +-
 HySoP/src/CMakeLists.txt                      |   17 +-
 .../src/scalesInterface/particles/advecX.f90  |   10 +-
 .../src/scalesInterface/particles/advecY.f90  |    9 +-
 .../src/scalesInterface/particles/advecZ.f90  |    9 +-
 .../particles/advec_common.f90                | 2048 -----------------
 ...ommon_group.F90 => advec_common_group.f90} |   86 +-
 15 files changed, 183 insertions(+), 2129 deletions(-)
 rename parmepy/new_ParMePy/Domain/Box.py => HySoP/hysop/box.py (100%)
 create mode 100644 HySoP/hysop/constants.py
 rename {parmepy/new_ParMePy/Domain => HySoP/hysop/domain}/__init__.py (100%)
 rename parmepy/new_ParMePy/Domain/DiscreteDomain.py => HySoP/hysop/domain/discrete.py (100%)
 rename parmepy/new_ParMePy/Domain/ContinuousDomain.py => HySoP/hysop/domain/domain.py (100%)
 rename parmepy/new_ParMePy/Domain/CartesianGrid.py => HySoP/hysop/domain/grid.py (100%)
 create mode 100755 HySoP/hysop/scales2py/scales2py.f90
 delete mode 100644 HySoP/src/scalesInterface/particles/advec_common.f90
 rename HySoP/src/scalesInterface/particles/{advec_common_group.F90 => advec_common_group.f90} (97%)

diff --git a/HySoP/hysop/__init__.py b/HySoP/hysop/__init__.py
index 2769db984..1e4759b42 100644
--- a/HySoP/hysop/__init__.py
+++ b/HySoP/hysop/__init__.py
@@ -7,13 +7,16 @@ Python package dedicated to flow simulation using particular methods on hybrid a
 __version__=1.00
 __all__=['Box','CartesianTopology','ScalarField']
 
-#import constants
+import constants
 
 import domain.box
-
 ## Box-type physical domain 
 Box = domain.box.Box
 
+## Cartesian grid
+import fields.discrete
+ScalarField = fields.discrete.ScalarField
+
 ## MPI topologies and associated meshes
 import domain.topology
 CartesianTopology = domain.topology.CartesianTopology
diff --git a/parmepy/new_ParMePy/Domain/Box.py b/HySoP/hysop/box.py
similarity index 100%
rename from parmepy/new_ParMePy/Domain/Box.py
rename to HySoP/hysop/box.py
diff --git a/HySoP/hysop/constants.py b/HySoP/hysop/constants.py
new file mode 100644
index 000000000..a8dcc85bd
--- /dev/null
+++ b/HySoP/hysop/constants.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+"""
+@package ParMePy.constants
+Constants required for the parmepy package (internal use).
+
+"""
+
+import numpy as np
+import math
+
+# 
+PI = math.pi
+# Set default type for real and integer numbers 
+PARMES_REAL = np.float64
+PARMES_INTEGER = np.uint32
+PARMES_REAL_GPU = np.float32
+## default array layout (fortran or C convention)
+ORDER = 'F'
+DEBUG = 0
+## label for x direction
+XDIR = 0
+## label for y direction
+YDIR = 1
+## label for z direction
+ZDIR = 2
+## Tag for periodic boundary conditions
+PERIODIC = 0
diff --git a/parmepy/new_ParMePy/Domain/__init__.py b/HySoP/hysop/domain/__init__.py
similarity index 100%
rename from parmepy/new_ParMePy/Domain/__init__.py
rename to HySoP/hysop/domain/__init__.py
diff --git a/parmepy/new_ParMePy/Domain/DiscreteDomain.py b/HySoP/hysop/domain/discrete.py
similarity index 100%
rename from parmepy/new_ParMePy/Domain/DiscreteDomain.py
rename to HySoP/hysop/domain/discrete.py
diff --git a/parmepy/new_ParMePy/Domain/ContinuousDomain.py b/HySoP/hysop/domain/domain.py
similarity index 100%
rename from parmepy/new_ParMePy/Domain/ContinuousDomain.py
rename to HySoP/hysop/domain/domain.py
diff --git a/parmepy/new_ParMePy/Domain/CartesianGrid.py b/HySoP/hysop/domain/grid.py
similarity index 100%
rename from parmepy/new_ParMePy/Domain/CartesianGrid.py
rename to HySoP/hysop/domain/grid.py
diff --git a/HySoP/hysop/scales2py/scales2py.f90 b/HySoP/hysop/scales2py/scales2py.f90
new file mode 100755
index 000000000..aa540b12a
--- /dev/null
+++ b/HySoP/hysop/scales2py/scales2py.f90
@@ -0,0 +1,84 @@
+!> Specific parameters for parmepy interface
+!! to deal with the problem explained here:
+!! http://projects.scipy.org/numpy/ticket/1835
+module parmepyparameters
+
+implicit none
+
+integer, parameter :: mk = 8
+integer, parameter :: dim3 = 3
+
+end module parmepyparameters
+
+!> Interface to scale code from Legi
+module scales2py
+
+use cart_topology, only : cart_create,set_group_size,discretisation_create,N_proc,coord
+use advec, only : advec_init,advec_step
+!!use precision, only : wp
+use parmepyparameters
+
+implicit none
+
+!! problem dimension
+!!integer, parameter :: dim3 = 3
+
+contains
+  
+  subroutine cartCreate(dims,groupSize)
+
+    !integer,intent(in) :: topoDim
+
+    integer,dimension(:),intent(in) :: dims
+    integer, intent(in) :: groupSize
+    integer :: ierr
+        
+    print *, "this is a call to scale ..."
+    
+    call cart_create(dims,ierr)
+    call set_group_size(groupSize)
+
+  end subroutine cartCreate
+
+  subroutine discretisationCreate(resolution,domainLengths)
+    
+    !! global resolution
+    integer, dimension(dim3),intent(in) :: resolution
+    !! global domain dimensions
+    real(mk), dimension(dim3),intent(in) :: domainLengths
+    
+    call discretisation_create(resolution(1),resolution(2),resolution(3),domainLengths(1),domainLengths(2),domainLengths(3))
+
+  end subroutine discretisationCreate
+
+  subroutine advecInit(order,stab_coeff)
+    character(len=*), optional, intent(in)  ::  order
+    real(mk), optional, intent(inout)         ::  stab_coeff
+    
+    call advec_init(order,stab_coeff)
+   
+  end subroutine advecInit
+
+  subroutine getLocalResolution(resolution)
+    integer,dimension(dim3),intent(out) :: resolution
+
+    resolution = N_proc
+  end subroutine getLocalResolution
+  
+  subroutine getTopoCoords(topocoords)
+    integer,dimension(dim3),intent(out) :: topocoords
+
+    topocoords = coord
+  end subroutine getTopoCoords
+
+  subroutine advecStep(timeStep,Vx,Vy,Vz,scal,dim_split)
+    real(mk), intent(in)                        :: timeStep
+    real(mk), dimension(:,:,:), intent(in)      :: Vx, Vy, Vz
+    real(mk), dimension(:,:,:), intent(inout)   :: scal
+    character(len=*), optional, intent(in)      :: dim_split
+
+    call advec_step(timeStep,Vx,Vy,Vz,scal,dim_split)
+
+  end subroutine advecStep
+
+end module scales2py
diff --git a/HySoP/setup.py.in b/HySoP/setup.py.in
index a0cac284a..126ff240a 100644
--- a/HySoP/setup.py.in
+++ b/HySoP/setup.py.in
@@ -6,14 +6,14 @@ setup.py file for @PYPACKAGE_NAME@
 """
 from numpy.distutils.core import setup, Extension
 import os, glob
-
+import sys
 # List of all directories for sources
 #os.listdir(os.path.join('ParMePy','New_ParmePy'))
 
 # Full package name
 name = '@PYPACKAGE_NAME@'
 # List of modules (directories) to be included
-packages = ['ParMePy','ParMePy.domain','ParMePy.fields']
+packages = ['parmepy','parmepy.domain','parmepy.fields']
 #            'examples']
 #            'new_ParMePy',
 #            'new_ParMePy/Domain',
@@ -25,10 +25,13 @@ packages = ['ParMePy','ParMePy.domain','ParMePy.fields']
 
 # Enable this to get debug info
 # DISTUTILS_DEBUG=1
-fortran_src = glob.glob('@CMAKE_SOURCE_DIR@/ParMePy/parmesfftw2py/*.f90')
+fortran_src = glob.glob('@CMAKE_SOURCE_DIR@/parmepy/parmesfftw2py/*.f90')
 parmes_dir=['@CMAKE_BINARY_DIR@/Modules']
 parmes_libdir=['@CMAKE_BINARY_DIR@/src']
 parmeslib=['@PARMES_LIBRARY_NAME@']
+#f2py_options=['-DF2PY_REPORT_ON_ARRAY_COPY']
+f2py_options=['--no-lower']
+sys.path.extend('config_fc -DF2PY_REPORT_ON_ARRAY_COPY'.split())
 #,f2py_options=['-c --f90exec=/Users/Franck/Softs/install-gnu/openmpi-1.5.3/bin/mpif90'])
 #f2py_options=['skip: discretisation_init :']
 # Example with include and link :
@@ -40,7 +43,9 @@ parmeslib=['@PARMES_LIBRARY_NAME@']
 
 #parpyModule=Extension(name='ParMePy.scales2py',sources=fortran_src,include_dirs=[legi_dir],library_dirs=legi_lib,libraries=['parmeslegi'])
 #parpyModule=Extension(name='parpy',sources=fortran_src)#,f2py_options=f2py_options)
-parpyModule=Extension(name='ParMePy.fftw2py',sources=fortran_src,include_dirs=parmes_dir,library_dirs=parmes_libdir,libraries=parmeslib)
+parpyModule=Extension(name='parmepy.fftw2py',f2py_options=f2py_options,sources=fortran_src,include_dirs=parmes_dir,library_dirs=parmes_libdir,libraries=parmeslib)
+scales_src = glob.glob('@CMAKE_SOURCE_DIR@/parmepy/scales2py/*.f90')
+scalesModule=Extension(name='parmepy.scales2py',f2py_options=f2py_options,sources=scales_src,include_dirs=parmes_dir,library_dirs=parmes_libdir,libraries=parmeslib)
 
 #ppm_lib_dir=[os.path.join(ppm_dir,'lib')]
 #ppm_lib=['ppm_core']
@@ -54,7 +59,7 @@ setup(name=name,
       url='https://forge.imag.fr/projects/parmes/',
       license='GNU public license',
       package_dir = {'': '@CMAKE_SOURCE_DIR@'},
-      ext_modules=[parpyModule],
+      ext_modules=[parpyModule,scalesModule],
       packages=packages
       #data_files=[('new_ParMePy/Utils', ['./new_ParMePy/Utils/gpu_src.cl'])]
 )
diff --git a/HySoP/src/CMakeLists.txt b/HySoP/src/CMakeLists.txt
index 099373346..5e156859a 100644
--- a/HySoP/src/CMakeLists.txt
+++ b/HySoP/src/CMakeLists.txt
@@ -1,7 +1,7 @@
 #=======================================================
 # cmake utility to compile,link and install : 
 #  - parmes library (libparmes...)
-#  - parmesscales library particular solver from scales, (libparmesscales...)
+#  - library particular solver from scales
 #  
 #=======================================================
 
@@ -19,6 +19,16 @@ set(${PARMES_LIBRARY_NAME}_SRCDIRS
   #src/interfaces/Fortran2Cpp
   #  src/interfaces/ppm
   )
+# --- scales ---
+if(WITH_SCALES)
+  set(${PARMES_LIBRARY_NAME}_SRCDIRS 
+    ${${PARMES_LIBRARY_NAME}_SRCDIRS}
+    scalesInterface/
+    scalesInterface/layout
+    scalesInterface/particles
+    scalesInterface/output
+    )
+endif()
 
 # A main file to create an executable (test purpose)
 # Any files in these dirs will be used to create Parmes exec (linked with libparmes)
@@ -76,11 +86,6 @@ if(USE_MPI)
   set(LIBS ${LIBS} ${MPI_Fortran_LIBRARIES} )
 endif(USE_MPI)
 
-# --- scales ---
-if(WITH_SCALES)
-  add_subdirectory(scalesInterface)
-endif()
-
 # --- PPM ---
 if(WITH_PPM)
   add_subdirectory(ppmInterface)
diff --git a/HySoP/src/scalesInterface/particles/advecX.f90 b/HySoP/src/scalesInterface/particles/advecX.f90
index 977a561fb..cb2029b28 100644
--- a/HySoP/src/scalesInterface/particles/advecX.f90
+++ b/HySoP/src/scalesInterface/particles/advecX.f90
@@ -30,6 +30,7 @@
 module advecX
 
     use precision
+    use advec_common
 
     implicit none
 
@@ -129,7 +130,6 @@ end subroutine advecX_calc
 !!    @param[in,out]    scal3D   = scalar field to advect
 subroutine advecX_calc_O2(dt,Vx,scal3D)
 
-    use advec_common    ! Some procedures common to advection along all directions
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! Description of mesh and of mpi topology
 
@@ -182,7 +182,6 @@ end subroutine advecX_calc_O2
 !!    @param[in]        gs      = size of groups (along X direction)
 subroutine advecX_calc_O2_group(dt,Vx,scal3D,gs)
 
-    use advec_common    ! Some procedures common to advection along all directions
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! Description of mesh and of mpi topology
 
@@ -237,7 +236,6 @@ end subroutine advecX_calc_O2_group
 !!    @param[in]        gs      = size of groups (along X direction)
 subroutine advecX_calc_O4(dt,Vx,scal3D,gs)
 
-    use advec_common    ! Some procedures common to advection along all directions
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! Description of mesh and of mpi topology
 
@@ -291,7 +289,6 @@ end subroutine advecX_calc_O4
 !!    @param[in]        gs      = size of groups (along X direction)
 subroutine advecX_calc_Mprime6(dt,Vx,scal3D,gs)
 
-    use advec_common    ! Some porcedure common to advection along all directions
     use cart_topology   ! Description of mesh and of mpi topology
 
     ! Input/Output
@@ -346,7 +343,6 @@ end subroutine advecX_calc_Mprime6
 !!    @param[in,out]    scal        = scalar field to advect
 subroutine Xremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal)
 
-    use advec_common            ! Some procedures common to advection along all directions
     use advec_remeshing_formula ! Remeshing formula
     use advec_variables         ! contains info about solver parameters and others.
     use cart_topology           ! Description of mesh and of mpi topology
@@ -414,7 +410,6 @@ end subroutine Xremesh_O2
 !!    @param[in,out]    scal        = scalar field to advect
 subroutine Xremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
 
-    use advec_common            ! Some procedures common to advection along all directions
     use advec_remeshing_formula ! Remeshing formula
     use advec_variables         ! contains info about solver parameters and others.
     use cart_topology           ! Description of mesh and of mpi topology
@@ -494,7 +489,6 @@ end subroutine Xremesh_O2_group
 !!    @param[in,out]    scal        = scalar field to advect
 subroutine Xremesh_O2_group_bis(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
 
-    use advec_common            ! Some procedures common to advection along all directions
     use advec_remeshing_formula ! Remeshing formula
     use advec_variables         ! contains info about solver parameters and others.
     use advec_abstract_proc     ! profile of generic procedure
@@ -733,7 +727,6 @@ end subroutine Xremesh_O2_group_bis
 !!    @param[in,out]    scal          = scalar field to advect
 subroutine Xremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
 
-    use advec_common            ! Some procedures common to advection along all directions
     use advec_remeshing_formula ! Remeshing formula
     use advec_variables         ! contains info about solver parameters and others.
     use cart_topology           ! Description of mesh and of mpi topology
@@ -812,7 +805,6 @@ end subroutine Xremesh_O4
 !!    @param[in,out]    scal        = scalar field to advect
 subroutine Xremesh_Mprime6(ind_group, gs, p_pos_adim, j,k,scal)
 
-    use advec_common            ! Some procedures common to advection along all directions
     use advec_remeshing_formula ! Remeshing formula
     use advec_variables         ! contains info about solver parameters and others.
     use cart_topology           ! Description of mesh and of mpi topology
diff --git a/HySoP/src/scalesInterface/particles/advecY.f90 b/HySoP/src/scalesInterface/particles/advecY.f90
index 39e7fd312..35865dfd7 100644
--- a/HySoP/src/scalesInterface/particles/advecY.f90
+++ b/HySoP/src/scalesInterface/particles/advecY.f90
@@ -31,6 +31,7 @@
 module advecY
 
     use precision
+    use advec_common
 
     implicit none
 
@@ -130,7 +131,6 @@ end subroutine advecY_calc
 !!    @param[in,out]    scal3D   = scalar field to advect
 subroutine advecY_calc_line(dt,Vy,scal3D)
 
-    use advec_common    ! some procedures common to advection along all directions
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! description of mesh and of mpi topology
 
@@ -181,7 +181,6 @@ end subroutine advecY_calc_line
 !!    @param[in]        gs      = size of group along current direction (ie along Y-axis)
 subroutine advecY_calc_group(dt,Vy,scal3D,gs)
 
-    use advec_common    ! some procedures common to advection along all directions
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! description of mesh and of mpi topology
 
@@ -234,7 +233,6 @@ end subroutine advecY_calc_group
 !!    @param[in]        gs      = size of group along current direction (ie along Y-axis)
 subroutine advecY_calc_O4(dt,Vy,scal3D,gs)
 
-    use advec_common    ! some procedures common to advection along all directions
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! description of mesh and of mpi topology
 
@@ -287,7 +285,6 @@ end subroutine advecY_calc_O4
 !!    @param[in]        gs      = size of group along current direction (ie along Y-axis)
 subroutine advecY_calc_Mprime6(dt,Vy,scal3D,gs)
 
-    use advec_common    ! some procedures common to advection along all directions
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! description of mesh and of mpi topology
 
@@ -342,7 +339,6 @@ end subroutine advecY_calc_Mprime6
 !!    @param[in,out]    scal        = scalar field to advect
 subroutine Yremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,k,scal)
 
-    use advec_common            ! Some procedures common to advection along all directions
     use advec_remeshing_formula ! Remeshing formula
     use advec_variables         ! contains info about solver parameters and others.
     use cart_topology           ! Description of mesh and of mpi topology
@@ -410,7 +406,6 @@ end subroutine Yremesh_O2
 !!    @param[in,out]    scal        = scalar field to advect
 subroutine Yremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,k,scal)
 
-    use advec_common            ! Some procedures common to advection along all directions
     use advec_remeshing_formula ! Remeshing formula
     use advec_variables         ! contains info about solver parameters and others.
     use cart_topology           ! Description of mesh and of mpi topology
@@ -489,7 +484,6 @@ end subroutine Yremesh_O2_group
 !!    @param[in,out]    scal        = scalar field to advect
 subroutine Yremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,k,scal)
 
-    use advec_common            ! Some procedures common to advection along all directions
     use advec_remeshing_formula ! Remeshing formula
     use advec_variables         ! contains info about solver parameters and others.
     use cart_topology           ! Description of mesh and of mpi topology
@@ -567,7 +561,6 @@ end subroutine Yremesh_O4
 !!    @param[in,out]    scal        = scalar field to advect
 subroutine Yremesh_Mprime6(ind_group, gs, p_pos_adim, i,k,scal)
 
-    use advec_common            ! Some procedures common to advection along all directions
     use advec_remeshing_formula ! Remeshing formula
     use advec_variables         ! contains info about solver parameters and others.
     use cart_topology           ! Description of mesh and of mpi topology
diff --git a/HySoP/src/scalesInterface/particles/advecZ.f90 b/HySoP/src/scalesInterface/particles/advecZ.f90
index daad4b17c..77b398fc6 100644
--- a/HySoP/src/scalesInterface/particles/advecZ.f90
+++ b/HySoP/src/scalesInterface/particles/advecZ.f90
@@ -31,6 +31,7 @@
 module advecZ
 
     use precision
+    use advec_common
 
     implicit none
 
@@ -138,7 +139,6 @@ end subroutine advecZ_calc
 !!    @param[in,out]    scal3D   = scalar field to advect
 subroutine advecZ_calc_O2(dt,Vz,scal3D)
 
-    use advec_common    ! some procedures common to advection along all directions
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! Description of mesh and of mpi topology
 
@@ -189,7 +189,6 @@ end subroutine advecZ_calc_O2
 !!    @param[in]        gs      = size of groups (along Z direction)
 subroutine advecZ_calc_O2_group(dt,Vz,scal3D,gs)
 
-    use advec_common    ! some procedures common to advection along all directions
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! Description of mesh and of mpi topology
 
@@ -243,7 +242,6 @@ end subroutine advecZ_calc_O2_group
 !!    @param[in]        gs      = size of groups (along Z direction)
 subroutine advecZ_calc_O4(dt,Vz,scal3D,gs)
 
-    use advec_common    ! some procedures common to advection along all directions
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! Description of mesh and of mpi topology
 
@@ -297,7 +295,6 @@ end subroutine advecZ_calc_O4
 !!    @param[in]        gs      = size of groups (along Z direction)
 subroutine advecZ_calc_Mprime6(dt,Vz,scal3D,gs)
 
-    use advec_common    ! some procedures common to advection along all directions
     use advec_variables ! contains info about solver parameters and others.
     use cart_topology   ! Description of mesh and of mpi topology
 
@@ -353,7 +350,6 @@ end subroutine advecZ_calc_Mprime6
 !!    @param[in,out]    scal        = scalar field to advect
 subroutine Zremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,j,scal)
 
-    use advec_common            ! Some procedures common to advection along all directions
     use advec_remeshing_formula ! Remeshing formula
     use advec_variables         ! contains info about solver parameters and others.
     use cart_topology           ! Description of mesh and of mpi topology
@@ -422,7 +418,6 @@ end subroutine Zremesh_O2
 !!    @param[in,out]    scal        = scalar field to advect
 subroutine Zremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,j,scal)
 
-    use advec_common            ! Some procedures common to advection along all directions
     use advec_remeshing_formula ! Remeshing formula
     use advec_variables         ! contains info about solver parameters and others.
     use cart_topology           ! Description of mesh and of mpi topology
@@ -501,7 +496,6 @@ end subroutine Zremesh_O2_group
 !!    @param[in,out]    scal          = scalar field to advect
 subroutine Zremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,j,scal)
 
-    use advec_common            ! Some procedures common to advection along all directions
     use advec_remeshing_formula ! Remeshing formula
     use advec_variables         ! contains info about solver parameters and others.
     use cart_topology           ! Description of mesh and of mpi topology
@@ -579,7 +573,6 @@ end subroutine Zremesh_O4
 !!    @param[in,out]    scal        = scalar field to advect
 subroutine Zremesh_Mprime6(ind_group, gs, p_pos_adim, i,j,scal)
 
-    use advec_common            ! Some procedures common to advection along all directions
     use advec_remeshing_formula ! Remeshing formula
     use advec_variables         ! contains info about solver parameters and others.
     use cart_topology           ! Description of mesh and of mpi topology
diff --git a/HySoP/src/scalesInterface/particles/advec_common.f90 b/HySoP/src/scalesInterface/particles/advec_common.f90
deleted file mode 100644
index 0368f38a8..000000000
--- a/HySoP/src/scalesInterface/particles/advec_common.f90
+++ /dev/null
@@ -1,2048 +0,0 @@
-!> @addtogroup part
-!! @{
-!------------------------------------------------------------------------------
-!
-! MODULE: advec_common
-!
-!
-! DESCRIPTION: 
-!> The module ``advec_common'' gather function and subroutines used to advec scalar
-!! which are not specific to a direction
-!! @details
-!! This module gathers functions and routines used to advec scalar which are not
-!! specific to a direction. This is a parallel implementation using MPI and 
-!! the cartesien topology it provides. It also contains the variables common to 
-!! the solver along each direction and other generic variables used for the
-!! advection based on the particle method.
-!! 
-!! Except for testing purpose, this module is not supposed to be used by the
-!! main code but only by the other advection module. More precisly, an final user
-!! must only used the generic "advec" module wich contain all the interface to
-!! solve the advection equation with the particle method, and to choose the
-!! remeshing formula, the dimensionnal splitting and everything else.
-!!
-!! The module "test_advec" can be used in order to validate the procedures
-!! embedded in this module.
-!
-!> @author
-!! Jean-Baptiste Lagaert, LEGI
-!
-!------------------------------------------------------------------------------
-
-module advec_common
-
-  use precision
-  use cart_topology
-  use advec_variables!, only : real_pter
-  use advec_common_line
-
-  implicit none
-
-  ! XXX Si passage au fortran 2003 : basculer toutes ces variables dans le module
-  ! advec (fichier advec.F90) et mettre toutes les variables en protected.
-  ! Seul la procédure "advec_init" doit pouvoir les modifier, mais de nombreuses
-  ! procédures doivent pouvoir y accéder.
-
-
-  ! Information about the particles and their bloc
-  public
-
-
-  ! ===== Public procedures =====
-  !----- Determine block type and tag particles -----
-  public                              :: AC_type_and_block
-  public                              :: AC_type_and_block_group
-  !----- To interpolate velocity -----
-  public                              :: AC_obtain_receivers
-  public                              :: AC_particle_velocity
-  private                             :: AC_velocity_interpol_group
-  !----- To remesh particles -----
-  public                              :: AC_obtain_senders
-  private                             :: AC_obtain_senders_group
-  private                             :: AC_obtain_senders_com
-  public                              :: AC_bufferToScalar
-  
-  interface AC_particle_velocity
-     module procedure AC_particle_velocity_line, AC_velocity_interpol_group
-  end interface AC_particle_velocity
-
-  interface AC_type_and_block
-     module procedure  AC_type_and_block_line, AC_type_and_block_group
-  end interface AC_type_and_block
-
-  !> Determine the set of processes wich will send me information during the
-  !!  scalar remeshing 
-  interface AC_obtain_senders
-     module procedure AC_obtain_senders_line, AC_obtain_senders_com, &
-          & AC_obtain_senders_group
-  end interface AC_obtain_senders
-  
-  interface AC_bufferToScalar
-     module procedure AC_bufferToScalar_line
-  end interface AC_bufferToScalar
-
-
-contains
-
-  ! ===== Public procedure =====
-
-
-  ! ==================================================================================
-  ! ====================     Compute particle velocity (RK2)      ====================
-  ! ==================================================================================
-
-
-  !> Interpolate the velocity field used in a RK2 scheme for particle advection -
-  !! version for a group of (more of one) line
-  !!    @param[in]        dt          = time step
-  !!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
-  !!    @param[in]        gs          = size of a group (ie number of line it gathers along the two other directions)
-  !!    @param[in]        ind_group   = coordinate of the current group of lines
-  !!    @param[in]        p_pos_adim  = adimensionned particle postion
-  !!    @param[in,out]    p_V         = particle velocity (along the current direction)
-  !! @details
-  !!    A RK2 scheme is used to advect the particles : the midlle point scheme. An
-  !!    intermediary position "p_pos_bis(i) = p_pos(i) + V(i)*dt/2" is computed and then 
-  !!    the numerical velocity of each particles is computed as the interpolation of V  in 
-  !!    this point. This field is used to advect the particles at the seconde order in time : 
-  !!    p_pos(t+dt, i) = p_pos(i) + p_V(i).
-  !!    The group line indice is used to ensure using unicity of each mpi message tag.
-  !!    The interpolation is done for a group of lines, allowing to mutualise
-  !!    communications. Considering a group of Na X Nb lines, communication performed 
-  !!    by this algorithm are around (Na x Nb) bigger than the alogorithm wich
-  !!    works on a single line but also around (Na x Nb) less frequent.
-  subroutine AC_velocity_interpol_group(dt, direction, gs, ind_group, p_pos_adim, p_V)
-
-    ! This code involve a recopy of p_V. It is possible to directly use the 3D velocity field but in a such code
-    ! a memory copy is still needed to send velocity field to other processus : mpi send contiguous memory values
-
-    ! Input/Ouput
-    real(WP), intent(in)                            :: dt           ! time step
-    integer, intent(in)                             :: direction    ! current direction
-    integer, dimension(2),intent(in)                :: gs           ! groupe size
-    integer, dimension(2), intent(in)               :: ind_group
-    real(WP), dimension(:,:,:), intent(in)          :: p_pos_adim
-    real(WP), dimension(:,:,:), intent(inout)       :: p_V
-    ! Others, local
-    real(WP),dimension(N_proc(direction),gs(1),gs(2))           :: p_pos_bis    ! adimensionned position of the middle point
-    real(WP), dimension(N_proc(direction),gs(1),gs(2)),target   :: p_V_bis      ! velocity of the middle point
-    real(WP), dimension(N_proc(direction),gs(1),gs(2))          :: weight       ! interpolation weight
-    type(real_pter),dimension(N_proc(direction),gs(1),gs(2))    :: Vp, Vm       ! Velocity on previous and next mesh point
-    real(WP), dimension(:), allocatable, target                 :: V_buffer     ! Velocity buffer for postion outside of the local subdomain
-    integer, dimension(:), allocatable                          :: pos_in_buffer! buffer size
-    integer , dimension(gs(1), gs(2))           :: rece_ind_min ! minimal indice of mesh involved in remeshing particles (of my local subdomains)
-    integer , dimension(gs(1), gs(2))           :: rece_ind_max ! maximal indice of mesh involved in remeshing particles (of my local subdomains)
-    integer                                     :: ind, ind_com ! indices
-    integer                                     :: i1, i2       ! indices in the lines group
-    integer                                     :: pos, pos_old ! indices of the mesh point wich preceed the particle position
-    integer                                     :: proc_gap, gap! distance between my (mpi) coordonate and coordinate of the 
-    ! processus associated to a given position
-    integer, dimension(:), allocatable          :: rece_rank    ! rank of processus wich send me information
-    integer, dimension(:), allocatable          :: send_rank    ! rank of processus to wich I send information
-    integer                                     :: rankP        ! rank of processus ("source rank" returned by mpi_cart_shift)
-    integer, dimension(:), allocatable          :: send_carto   ! cartogrpahy of what I have to send
-    integer                                     :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
-    integer                                     :: ind_for_i1   ! where to read the first coordinate (i1) of the current line inside the cartography ?
-    real(WP), dimension(:), allocatable         :: send_buffer  ! to store what I have to send (on a contiguous way)
-    integer, dimension(gs(1),gs(2),2)           :: rece_gap     ! distance between me and processus wich send me information
-    integer, dimension(2 , 2)                   :: send_gap     ! distance between me and processus to wich I send information
-    integer, dimension(2)                       :: rece_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
-    integer                                     :: com_size     ! size of message send/receive
-    integer                                     :: min_size     ! minimal size of cartography(:,proc_gap)
-    integer                                     :: max_size     ! maximal size of cartography(:,proc_gap)
-    integer                                     :: tag          ! mpi message tag
-    integer                                     :: ierr         ! mpi error code
-    integer, dimension(:), allocatable          :: s_request    ! mpi communication request (handle) of nonblocking send
-    integer, dimension(:), allocatable          :: s_request_bis! mpi communication request (handle) of nonblocking send
-    integer, dimension(:), allocatable          :: rece_request ! mpi communication request (handle) of nonblocking receive
-    integer, dimension(MPI_STATUS_SIZE)         :: rece_status  ! mpi status (for mpi_wait)
-    integer, dimension(:,:), allocatable        :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block for wich the 
-    ! current processus requiers data from proc_gap and for each of these lines the range 
-    ! of mesh points from where it requiers the velocity values.
-
-    ! -- Initialisation --
-    do i2 = 1, gs(2) 
-       do i1 = 1, gs(1) 
-          do ind = 1, N_proc(direction)
-             nullify(Vp(ind,i1,i2)%pter)
-             nullify(Vm(ind,i1,i2)%pter)
-          end do
-       end do
-    end do
-    ! Compute the midlle point
-    p_pos_bis = p_pos_adim + (dt/2.0)*p_V/d_sc(direction)
-    p_V_bis = p_V
-    ! XXX Todo / Optim
-    ! Ici recopie de la vitesse. On doit la faire car on calcule la vitesse
-    ! interpolée à partir d' "elle-même" : la variable calculée dépend de sa
-    ! précédente valeur en des points qui peuvent différé. Si on l'écrase au fur et 
-    ! à mesure on fait des calculs faux.
-    ! On pourrait utiliser directement le champ de vitesse V en entrée pour donner
-    ! p_V en sortie, mais cela pose un problème car selon les directions il faudrait
-    ! changer l'ordre des indices i, i1 et i2. Ce qui est un beua bordel !!
-    ! XXX
-    ! Compute range of the set of point where I need the velocity value
-    rece_ind_min = floor(p_pos_bis(1,:,:))
-    rece_ind_max = floor(p_pos_bis(N_proc(direction),:,:)) + 1
-
-    ! ===== Exchange velocity field if needed =====
-    ! It uses non blocking message to do the computations during the communication process
-    ! -- What have I to communicate ? --
-    rece_gap(:,:,1) = floor(real(rece_ind_min-1)/N_proc(direction))
-    rece_gap(:,:,2) = floor(real(rece_ind_max-1)/N_proc(direction))
-    rece_gap_abs(1) = minval(rece_gap(:,:,1))
-    rece_gap_abs(2) = maxval(rece_gap(:,:,2))
-    max_size = 2 + gs(2)*(2+3*gs(1))
-    allocate(cartography(max_size,rece_gap_abs(1):rece_gap_abs(2)))
-    allocate(rece_rank(rece_gap_abs(1):rece_gap_abs(2)))
-    call AC_velocity_determine_communication(direction, ind_group, gs, send_gap,  &
-         & rece_gap, rece_gap_abs, rece_rank, cartography)
-
-    ! -- Send messages about what I want --
-    allocate(s_request_bis(rece_gap_abs(1):rece_gap_abs(2)))
-    min_size = 2 + gs(2)
-    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
-       if (rece_rank(proc_gap) /= D_rank(direction)) then
-          cartography(1,proc_gap) = 0
-          ! Use the cartography to know which lines are concerned
-          com_size = cartography(2,proc_gap)
-          ! Range I want - store into the cartography
-          gap = proc_gap*N_proc(direction)
-          ! Position in cartography(:,proc_gap) of the current i1 indice
-          ind_for_i1 = min_size
-          do i2 = 1, gs(2)
-             do ind = ind_for_i1+1, ind_for_i1 + cartography(2+i2,proc_gap), 2
-                do i1 = cartography(ind,proc_gap), cartography(ind+1,proc_gap)
-                   ! Interval start from:
-                   cartography(com_size+1,proc_gap) = max(rece_ind_min(i1,i2), gap+1) ! fortran => indice start from 0
-                   ! and ends at:
-                   cartography(com_size+2,proc_gap) = min(rece_ind_max(i1,i2), gap+N_proc(direction))
-                   ! update number of element to receive
-                   cartography(1,proc_gap) = cartography(1,proc_gap) &
-                        & + cartography(com_size+2,proc_gap) &
-                        & - cartography(com_size+1,proc_gap) + 1
-                   com_size = com_size+2
-                end do
-             end do
-             ind_for_i1 = ind_for_i1 + cartography(2+i2,proc_gap)
-          end do
-          ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.                          
-          tag = compute_tag(ind_group, tag_bufToScal_range, direction, proc_gap)
-          ! Send message
-          call mpi_Isend(cartography(1,proc_gap), com_size, MPI_INTEGER, send_rank(proc_gap), tag, &
-               D_comm(direction), s_request_bis(proc_gap),ierr)
-       end if
-    end do
-
-    ! -- Send the velocity field to processus which need it --
-    allocate(s_request(send_gap(1,1):send_gap(1,2)))
-    allocate(send_rank(send_gap(1,1):send_gap(1,2)))
-    allocate(send_carto(max_size))
-    do proc_gap = send_gap(1,1), send_gap(1,2)
-       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank(proc_gap), ierr)
-       if (send_rank(proc_gap) /= D_rank(direction)) then
-          ! I - Receive messages about what I have to send
-          ! Ia - Compute reception tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.             
-          tag = compute_tag(ind_group, tag_velo_range, direction, -proc_gap)
-          ! Ib - Receive the message
-          call mpi_recv(send_carto(1), max_size, MPI_INTEGER, send_rank(proc_gap), tag, D_comm(direction), rece_status, ierr)
-          ! II - Send it
-          ! IIa - Create send buffer
-          allocate(send_buffer(send_carto(1)))
-          gap = proc_gap*N_proc(direction)
-          com_size = 0
-          ind_1Dtable = send_carto(2)
-          ! Position in cartography(:,proc_gap) of the current i1 indice
-          ind_for_i1 = min_size
-          do i2 = 1, gs(2)
-             do ind = ind_for_i1+1, ind_for_i1 + send_carto(2+i2), 2
-                do i1 = send_carto(ind), send_carto(ind+1)
-                   do ind_com = send_carto(ind_1Dtable+1)+gap, send_carto(ind_1Dtable+2)+gap ! indice inside the current line
-                      com_size = com_size + 1
-                      send_buffer(com_size) = p_V(ind_com, i1,i2)
-                   end do
-                   ind_1Dtable = ind_1Dtable + 2
-                end do
-             end do
-             ind_for_i1 = ind_for_i1 + send_carto(2+i2)
-          end do
-          ! IIb - Compute send tag
-          tag = compute_tag(ind_group, tag_velo_V, direction, proc_gap)
-          ! IIc - Send message
-          call mpi_Isend(send_buffer(1), com_size, MPI_DOUBLE_PRECISION, &
-               & send_rank(proc_gap), tag, D_comm(direction), s_request(proc_gap), ierr)
-          deallocate(send_buffer)
-       end if
-    end do
-    deallocate(send_carto)
-
-    ! -- Non blocking reception of the velocity field --
-    ! Allocate the pos_in_buffer to compute V_buffer size and to be able to
-    ! allocate it.
-    allocate(pos_in_buffer(rece_gap_abs(1):rece_gap_abs(2)))
-    pos_in_buffer(rece_gap_abs(1)) = 1
-    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)-1
-       pos_in_buffer(proc_gap+1)= pos_in_buffer(proc_gap) + cartography(1,proc_gap)
-    end do
-    allocate(V_buffer(pos_in_buffer(rece_gap_abs(2)) &
-         & + cartography(1,rece_gap_abs(2))))
-    V_buffer = 0
-    allocate(rece_request(rece_gap_abs(1):rece_gap_abs(2)))
-    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
-       if (rece_rank(proc_gap) /= D_rank(direction)) then
-          ! IIa - Compute reception tag
-          tag = compute_tag(ind_group, tag_velo_V, direction, -proc_gap)
-          ! IIb - Receive message
-          call mpi_Irecv(V_buffer(pos_in_buffer(proc_gap)), cartography(1,proc_gap), MPI_DOUBLE_PRECISION, &
-               & rece_rank(proc_gap), tag, D_comm(direction), rece_request(proc_gap), ierr) 
-       end if
-    end do
-    deallocate(cartography) ! We do not need it anymore
-
-    ! ===== Compute the interpolated velocity =====
-    ! -- Compute the interpolation weight and update the pointers Vp and Vm --
-    do i2 = 1, gs(2)
-       do i1 = 1, gs(1)
-          ! Initialisation of reccurence process
-          ind = 1
-          pos = floor(p_pos_bis(ind,i1,i2))
-          weight(ind,i1,i2) = p_pos_bis(ind,i1,i2)-pos
-          ! Vm = V(pos)
-          proc_gap = floor(real(pos-1)/N_proc(direction)) 
-          if (rece_rank(proc_gap) == D_rank(direction)) then
-             Vm(ind,i1,i2)%pter => p_V_bis(pos-proc_gap*N_proc(direction), i1,i2)
-          else 
-             Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
-             pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
-          end if
-          ! Vp = V(pos+1)
-          proc_gap = floor(real(pos+1-1)/N_proc(direction)) 
-          if (rece_rank(proc_gap) == D_rank(direction)) then
-             Vp(ind,i1,i2)%pter => p_V_bis(pos+1-proc_gap*N_proc(direction), i1,i2)
-          else 
-             Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
-             pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
-          end if
-          pos_old = pos
-
-          ! Following indice : we use previous work (already done)
-          do ind = 2, N_proc(direction)
-             pos = floor(p_pos_bis(ind,i1,i2))
-             weight(ind,i1,i2) = p_pos_bis(ind,i1,i2)-pos
-             select case(pos-pos_old)
-             case(0)
-                ! The particle belongs to the same segment than the previous one
-                Vm(ind,i1,i2)%pter => Vm(ind-1,i1,i2)%pter
-                Vp(ind,i1,i2)%pter => Vp(ind-1,i1,i2)%pter
-             case(1)
-                ! The particle follows the previous one
-                Vm(ind,i1,i2)%pter => Vp(ind-1,i1,i2)%pter
-                ! Vp = V(pos+1)
-                proc_gap = floor(real(pos+1-1)/N_proc(direction)) ! fortran -> indice starts from 1
-                if (rece_rank(proc_gap) == D_rank(direction)) then
-                   Vp(ind,i1,i2)%pter => p_V_bis(pos+1-proc_gap*N_proc(direction), i1,i2)
-                else 
-                   Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
-                   pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
-                end if
-             case(2)
-                ! pos = pos_old +2, wich correspond to "extention"
-                ! Vm = V(pos)
-                proc_gap = floor(real(pos-1)/N_proc(direction)) 
-                if (rece_rank(proc_gap) == D_rank(direction)) then
-                   Vm(ind,i1,i2)%pter => p_V_bis(pos-proc_gap*N_proc(direction), i1,i2)
-                else 
-                   Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
-                   pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
-                end if
-                ! Vp = V(pos+1)
-                proc_gap = floor(real(pos+1-1)/N_proc(direction)) 
-                if (rece_rank(proc_gap) == D_rank(direction)) then
-                   Vp(ind,i1,i2)%pter => p_V_bis(pos+1-proc_gap*N_proc(direction), i1,i2)
-                else 
-                   Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
-                   pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
-                end if
-             case default
-                print*, "unexpected case : pos = ", pos, " , pos_old = ", pos_old, &
-                     & " ind = ", ind, " i1 = ", i1, " i2 = ", i2
-             end select
-             pos_old = pos
-          end do ! loop on particle indice inside the current line
-       end do ! loop on first coordinate (i1) of a line inside the block of line
-    end do ! loop on second coordinate (i2) of a line inside the block of line
-    deallocate(pos_in_buffer)   ! We do not need it anymore
-
-    ! -- Compute the interpolate velocity --
-    ! Check if communication are done
-    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
-       if (rece_rank(proc_gap)/=D_rank(direction)) then
-          call mpi_wait(rece_request(proc_gap), rece_status, ierr)
-       end if
-    end do
-    deallocate(rece_request)
-
-    ! Then compute the field
-    do i2 = 1, gs(2)
-       do i1 = 1, gs(1)
-          do ind = 1, N_proc(direction)
-             p_V(ind,i1,i2) = weight(ind,i1,i2)*Vp(ind,i1,i2)%pter + (1.-weight(ind,i1,i2))*Vm(ind,i1,i2)%pter
-          end do
-       end do
-    end do
-
-
-    ! ===== Free memory =====
-    ! -- Pointeur --
-    do i2 = 1, gs(2)
-       do i1 = 1, gs(1)
-          do ind = 1, N_proc(direction)
-             nullify(Vp(ind,i1,i2)%pter)
-             nullify(Vm(ind,i1,i2)%pter)
-          end do
-       end do
-    end do
-    ! -- Mpi internal buffer for non blocking communication --
-    do proc_gap = send_gap(1,1), send_gap(1,2)
-       if (send_rank(proc_gap) /= D_rank(direction)) then
-          call MPI_WAIT(s_request(proc_gap),rece_status,ierr)
-       end if
-    end do
-    deallocate(s_request)
-    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
-       if (rece_rank(proc_gap) /= D_rank(direction)) then
-          call MPI_WAIT(s_request_bis(proc_gap),rece_status,ierr)
-       end if
-    end do
-    deallocate(s_request_bis)
-    ! -- Deallocate dynamic array --
-    deallocate(V_buffer)
-    deallocate(rece_rank)
-    deallocate(send_rank)
-
-  end subroutine AC_velocity_interpol_group
-
-  !> Determine the set of processes wich will send me information during the velocity interpolation and compute 
-  !! for each of these processes the range of wanted data.
-  !!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
-  !!    @param[in]    ind_group       = coordinate of the current group of lines
-  !!    @param[out]   send_gap        = gap between my coordinate and the processes of minimal coordinate which will send information to me
-  !!    @param[in]    rece_gap        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
-  !!    @param[in]    rece_gap_abs    = min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
-  !!    @param[out]   cartography     = cartography(proc_gap) contains the set of the lines indice in the block for wich the 
-  !!                                    current processus requiers data from proc_gap and for each of these lines the range 
-  !!                                    of mesh points from where it requiers the velocity values.
-  !! @details
-  !!    Work on a group of line of size gs(1) x gs(2))
-  !!    Obtain the list of processus wich need a part of my local velocity field
-  !!    to interpolate the velocity used in the RK2 scheme to advect its particles.
-  !!    In the same time, it computes for each processus from which I need a part
-  !!    of the velocity field, the range of mesh point where I want data and store it 
-  !!    by using some sparse matrix technics (see cartography defined in the
-  !!    algorithm documentation)
-  subroutine AC_velocity_determine_communication(direction, ind_group, gs, send_gap,  &
-       & rece_gap, rece_gap_abs, rece_rank, cartography)
-    ! XXX Work only for periodic condition.
-
-    ! Input/Ouput
-    integer, intent(in)                                 :: direction
-    integer, dimension(2), intent(in)                   :: ind_group
-    integer, dimension(2), intent(in)                   :: gs
-    integer, dimension(gs(1), gs(2), 2), intent(in)     :: rece_gap
-    integer, dimension(2), intent(in)                   :: rece_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
-    integer,dimension(rece_gap_abs(1):rece_gap_abs(2))&
-         &, intent(out)                                  :: rece_rank    ! rank of processus wich send me information
-    integer, dimension(2, 2), intent(out)               :: send_gap
-    integer, dimension(2+gs(2)*(2+3*gs(1)), &
-         & rece_gap_abs(1):rece_gap_abs(2)), intent(out) :: cartography
-    ! Others 
-    integer                             :: proc_gap         ! gap between a processus coordinate (along the current 
-    ! direction) into the mpi-topology and my coordinate
-    integer, dimension(gs(1), gs(2))    :: rece_gapP        ! gap between the coordinate of the previous processus (in the current direction)
-    ! and the processes of maximal coordinate which will receive information from it
-    integer, dimension(gs(1), gs(2))    :: rece_gapN        ! same as above but for the next processus
-    integer                             :: rankP            ! processus rank for shift (P= previous, N = next)
-    integer                             :: send_request_gh  ! mpi status of noindicelocking send
-    integer                             :: send_request_gh2 ! mpi status of noindicelocking send
-    integer                             :: ierr             ! mpi error code
-    integer, dimension(2)               :: tag_table        ! some mpi message tag
-    logical, dimension(:,:), allocatable:: test_request     ! for mpi non blocking communication
-    integer, dimension(:,:), allocatable:: send_request     ! for mpi non blocking send
-    integer                             :: ind1, ind2       ! indice of the current line inside the group
-    integer,dimension(2)                :: rece_buffer      ! buffer for reception of rece_max
-    integer, dimension(:,:), allocatable:: first, last      ! Storage processus to which I will be the first (or the last) to receive
-    integer                             :: min_size         ! begin indice in first and last to stock indice along first dimension of the group line
-    integer                             :: gp_size          ! group size
-    logical                             :: begin_interval   ! ware we in the start of an interval ?
-    logical                             :: not_myself       ! Is the target processus myself ?
-    integer, dimension(MPI_STATUS_SIZE) :: statut
-
-    send_gap(1,1) = 3*N(direction)
-    send_gap(1,2) = -3*N(direction)
-    send_gap(2,:) = 0
-    gp_size = gs(1)*gs(2)
-
-    ! ===== Communicate with my neigbors -> obtain ghost ! ====
-    ! Inform that about processus from which I need information
-    tag_table = compute_tag(ind_group, tag_obtrec_ghost_NP, direction)
-    call mpi_Isend(rece_gap(1,1,1), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(1), &
-         & D_comm(direction), send_request_gh, ierr)
-    call mpi_Isend(rece_gap(1,1,2), gp_size, MPI_INTEGER, neighbors(direction,2), tag_table(2), &
-         & D_comm(direction), send_request_gh2, ierr)
-    ! Receive the same message form my neighbors
-    call mpi_recv(rece_gapN(1,1), gp_size, MPI_INTEGER, neighbors(direction,2), tag_table(1), D_comm(direction), statut, ierr)
-    call mpi_recv(rece_gapP(1,1), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(2), D_comm(direction), statut, ierr)
-
-    ! ===== Compute if I am first or last and determine the carography =====
-    min_size = 2 + gs(2)
-    ! Initialize first and last to determine if I am the the first or the last processes (considering the current direction) 
-    ! to require information from this processus
-    allocate(first(2,rece_gap_abs(1):rece_gap_abs(2)))
-    first(2,:) = 0  ! number of lines for which I am the first
-    allocate(last(2,rece_gap_abs(1):rece_gap_abs(2)))
-    last(2,:) = 0   ! number of lines for which I am the last
-    ! Initialize cartography
-    cartography(1,:) = 0            ! number of velocity values to receive
-    cartography(2,:) = min_size     ! number of element to send when sending cartography
-    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
-       first(1,proc_gap) = -proc_gap
-       last(1,proc_gap) = -proc_gap
-       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rece_rank(proc_gap), ierr)
-       not_myself = (rece_rank(proc_gap) /= D_rank(direction)) ! Is the target processus myself ?
-       do ind2 = 1, gs(2)
-          cartography(2+ind2,proc_gap) = 0    ! 2 x number of interval of concern line into the column i2
-          begin_interval = .true.
-          do ind1 = 1, gs(1)
-             ! Does proc_gap belongs to [rece_gap(i1,i2,1);rece_gap(i1,i2,2)]?
-             if((proc_gap>=rece_gap(ind1,ind2,1)).and.(proc_gap<=rece_gap(ind1,ind2,2))) then
-                ! Compute if I am the first.
-                if (proc_gap>rece_gapP(ind1,ind2)-1) then
-                   first(2,proc_gap) =  first(2,proc_gap)+1
-                end if
-                ! Compute if I am the last.
-                if (proc_gap<rece_gapN(ind1,ind2)+1) then
-                   last(2,proc_gap) =  last(2,proc_gap)+1
-                end if
-                ! Update cartography // Not need I target processus is myself
-                if (not_myself) then
-                   if (begin_interval) then 
-                      cartography(2+ind2,proc_gap) =  cartography(2+ind2,proc_gap)+2
-                      cartography(cartography(2,proc_gap)+1,proc_gap) = ind1
-                      cartography(2,proc_gap) = cartography(2,proc_gap) + 2
-                      cartography(cartography(2,proc_gap),proc_gap) = ind1
-                      begin_interval = .false.
-                   else
-                      cartography(cartography(2,proc_gap),proc_gap) = ind1
-                   end if
-                end if
-             else
-                begin_interval = .true.
-             end if
-          end do
-       end do
-    end do
-
-    ! ===== Send information about first and last  =====
-    tag_table = compute_tag(ind_group, tag_obtrec_NP, direction)
-    allocate(send_request(rece_gap_abs(1):rece_gap_abs(2),2))
-    allocate(test_request(rece_gap_abs(1):rece_gap_abs(2),2))
-    test_request = .false.
-    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
-       ! I am the first ?
-       if (first(2,proc_gap)>0) then
-          if(rece_rank(proc_gap)/= D_rank(direction)) then
-             call mpi_Isend(first(1,proc_gap), 2, MPI_INTEGER, rece_rank(proc_gap), tag_table(1), D_comm(direction), &
-                  & send_request(proc_gap,1), ierr)
-             test_request(proc_gap,1) = .true.
-          else
-             send_gap(1,1) = min(send_gap(1,1), -proc_gap)
-             send_gap(2,1) = send_gap(2,1) + first(2,proc_gap)
-          end if
-       end if
-       ! I am the last ?
-       if (last(2,proc_gap)>0) then
-          if(rece_rank(proc_gap)/= D_rank(direction)) then
-             call mpi_Isend(last(1,proc_gap), 2, MPI_INTEGER, rece_rank(proc_gap), tag_table(2), D_comm(direction), &
-                  & send_request(proc_gap,2), ierr)
-             test_request(proc_gap,2) = .true.
-          else
-             send_gap(1,2) = max(send_gap(1,2), -proc_gap)
-             send_gap(2,2) = send_gap(2,2) + last(2,proc_gap)
-          end if
-       end if
-    end do
-
-
-
-    ! ===== Receive information form the first and the last processus which need a part of my local velocity field =====
-    do while(send_gap(2,1) < gp_size)
-       call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
-       send_gap(1,1) = min(send_gap(1,1), rece_buffer(1))
-       send_gap(2,1) = send_gap(2,1) + rece_buffer(2)
-    end do
-    do while(send_gap(2,2) < gp_size)
-       call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
-       send_gap(1,2) = max(send_gap(1,2), rece_buffer(1))
-       send_gap(2,2) = send_gap(2,2) + rece_buffer(2)
-    end do
-
-    ! ===== Free Isend buffer =====
-    call MPI_WAIT(send_request_gh,statut,ierr)
-    call MPI_WAIT(send_request_gh2,statut,ierr)
-    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
-       if (test_request(proc_gap,1).eqv. .true.) call MPI_WAIT(send_request(proc_gap,1),statut,ierr)
-       if (test_request(proc_gap,2)) call MPI_WAIT(send_request(proc_gap,2),statut,ierr)
-    end do
-    deallocate(send_request)
-    deallocate(test_request)
-
-  end subroutine AC_velocity_determine_communication
-
-
-  ! ===================================================================================================
-  ! ====================     Others than velocity interpolation and remeshing      ====================
-  ! ===================================================================================================
-
-  !> Determine type (center or left) of each block and tagfor a complete group of
-  !! lines.
-  !! corrected remeshing formula are recquired.
-  !!    @param[in]        dt          = time step
-  !!    @param[in]        dir         = current direction (1 = along X, 2 = along Y and 3 = along Z)
-  !!    @param[in]        gp_s        = size of a group (ie number of line it gathers along the two other directions)
-  !!    @param[in]        ind_group   = coordinate of the current group of lines
-  !!    @param[in]        p_V         = particle velocity (along the current direction)
-  !!    @param[out]       bl_type     = table of blocks type (center of left)
-  !!    @param[out]       bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block 
-  !!                                    and the begining of the following one is tagged)
-  !! @details
-  !!        This subroutine work on a groupe of line. For each line of this group, it
-  !!    determine the type of each block of this line and where corrected remeshing
-  !!    formula are required. In those points, it tagg block transition (ie the end of
-  !!    the current block and the beginning of the following one) in order to indicate
-  !!    that corrected weigth have to be used during the remeshing.
-  subroutine AC_type_and_block_group(dt, dir, gp_s, ind_group, p_V,bl_type, bl_tag)
-
-    real(WP), intent(in)                                        :: dt           ! time step
-    integer, intent(in)                                         :: dir
-    integer, dimension(2),intent(in)                            :: gp_s         ! groupe size
-    integer, dimension(2), intent(in)                           :: ind_group    ! group indice
-    real(WP), dimension(:,:,:), intent(in)                      :: p_V
-    logical,dimension(bl_nb(dir)+1,gp_s(1),gp_s(2)),intent(out) :: bl_type      ! is the particle block a center block or a left one ?
-    logical,dimension(bl_nb(dir),gp_s(1),gp_s(2)),intent(out)   :: bl_tag       ! indice of tagged particles
-
-    real(WP),dimension(bl_nb(dir)+1,gp_s(1),gp_s(2))            :: bl_lambdaMin ! for a particle, lamda = V*dt/dx ;  bl_lambdaMin = min of 
-    ! lambda on a block (take also into account first following particle)
-    real(WP),dimension(gp_s(1),gp_s(2))                         :: lambP, lambN ! buffer to exchange some lambda min with other processus
-    real(WP),dimension(gp_s(1),gp_s(2))                         :: lambB, lambE ! min value of lambda of the begin of the line and at the end of the line
-    integer, dimension(bl_nb(dir)+1,gp_s(1),gp_s(2))            :: bl_ind       ! block index : integer as lambda in (bl_ind,bl_ind+1) for a left block
-    ! and lambda in (bl_ind-1/2, bl_ind+1/2) for a right block 
-    integer                                                     :: ind,i_p      ! some indices
-    real(WP)                                                    :: cfl          ! = d_sc
-    integer, dimension(2)                                       :: send_request ! mpi status of nonblocking send
-    integer, dimension(2)                                       :: rece_request ! mpi status of nonblocking receive
-    integer, dimension(MPI_STATUS_SIZE)                         :: rece_status  ! mpi status (for mpi_wait)
-    integer, dimension(MPI_STATUS_SIZE)                         :: send_status  ! mpi status (for mpi_wait)
-    integer, dimension(2)                                       :: tag_table    ! other tags for mpi message
-    integer                                                     :: ierr         ! mpi error code
-
-    ! ===== Initialisation =====
-    cfl = dt/d_sc(dir)
-
-    ! ===== Compute bl_lambdaMin =====
-
-    ! -- For the first block (1/2) --
-    ! The domain contains only its second half => exchange ghost with the previous processus
-    lambB = minval(p_V(1:(bl_size/2)+1,:,:),1)*cfl
-    tag_table = compute_tag(ind_group, tag_part_tag_NP, dir)
-    ! Send message
-    call mpi_Isend(lambB(1,1), gp_s(1)*gp_s(2), MPI_DOUBLE_PRECISION, &
-         & neighbors(dir,1), tag_table(1), D_comm(dir), send_request(1), ierr)
-    ! Receive it
-    call mpi_Irecv(lambN(1,1), gp_s(1)*gp_s(2), MPI_DOUBLE_PRECISION, &
-         & neighbors(dir,2), tag_table(1), D_comm(dir), rece_request(1), ierr) 
-
-    ! -- For the last block (1/2) --
-    ! The processus contains only its first half => exchange ghost with the next processus
-    ind = bl_nb(dir) + 1
-    lambE = minval(p_V(N_proc(dir) - (bl_size/2)+1 :N_proc(dir),:,:),1)*cfl
-    ! Send message
-    call mpi_Isend(lambE(1,1), gp_s(1)*gp_s(2), MPI_DOUBLE_PRECISION, &
-         & neighbors(dir,2), tag_table(2), D_comm(dir), send_request(2), ierr)
-    ! Receive it
-    call mpi_Irecv(lambP(1,1), gp_s(1)*gp_s(2), MPI_DOUBLE_PRECISION, &
-         &  neighbors(dir,1), tag_table(2), D_comm(dir), rece_request(2), ierr) 
-
-    ! -- For the "middle" block --
-    do ind = 2, bl_nb(dir)
-       i_p = ((ind-1)*bl_size) + 1 - bl_size/2
-       bl_lambdaMin(ind,:,:) = minval(p_V(i_p:i_p+bl_size,:,:),1)*cfl
-    end do
-
-    ! -- For the first block (1/2) --
-    ! The domain contains only its second half => use exchanged ghost
-    ! Check reception
-    call mpi_wait(rece_request(2), rece_status, ierr)
-    bl_lambdaMin(1,:,:) = min(lambB(:,:), lambP(:,:))
-
-    ! -- For the last block (1/2) --
-    ! The processus contains only its first half => use exchanged ghost
-    ! Check reception
-    call mpi_wait(rece_request(1), rece_status, ierr)
-    ind = bl_nb(dir) + 1
-    bl_lambdaMin(ind,:,:) = min(lambE(:,:), lambN(:,:))
-
-    ! ===== Compute block type and index =====
-    bl_ind = nint(bl_lambdaMin)
-    bl_type = (bl_lambdaMin<dble(bl_ind))
-
-    ! ===== Tag particles =====
-
-    do ind = 1, bl_nb(dir)
-       bl_tag(ind,:,:) = ((bl_ind(ind,:,:)/=bl_ind(ind+1,:,:)) .and. &
-            & (bl_type(ind,:,:).neqv.bl_type(ind+1,:,:)))
-    end do
-
-    call mpi_wait(send_request(1), send_status, ierr)
-    call mpi_wait(send_request(2), send_status, ierr)
-
-  end subroutine AC_type_and_block_group
-
-  ! ============================================================================
-  ! ====================     Tools to remesh particles      ====================
-  ! ============================================================================
-
-  !> Determine the set of processes wich will send me information during the remeshing 
-  !! and compute for each of these processes the range of wanted data. Use implicit 
-  !! computation rather than communication (only possible if particle are gather by 
-  !! block whith contrainst on velocity variation - as corrected lambda formula.) -
-  !! work directly on a group of particles lines.
-  !     @param[in]    send_group_min  = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
-  !     @param[in]    send_group_max  = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
-  !!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
-  !!    @param[in]    ind_group       = coordinate of the current group of lines
-  !!    @param[out]   proc_min        = gap between my coordinate and the processes of minimal coordinate which will receive information from me
-  !!    @param[out]   proc_max        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
-  !!    @param[out]   rece_gap        = coordinate range of processes which will send me information during the remeshing.
-  !!    @param[in]    gs              = size of group of line along the current direction
-  !! @details
-  !!    Work on a group of line of size gs(1) x gs(2))
-  !!    Obtain the list of processts which are associated to sub-domain where my partticles
-  !!    will be remeshed and the list of processes wich contains particles which
-  !!    have to be remeshed in my sub-domain. This way, this procedure determine
-  !!    which processus need to communicate together in order to proceed to the
-  !!    remeshing (as in a parrallel context the real space is subdivised and each
-  !!    processus contains a part of it)
-  !!        In the same time, it computes for each processus with which I will
-  !!    communicate, the range of mesh point involved for each line of particles 
-  !!    inside the group and it stores it by using some sparse matrix technics 
-  !!    (see cartography defined in the algorithm documentation)
-  !!        This routine does not involve any computation to determine if
-  !!    a processus is the first or the last processes (considering its coordinate along 
-  !!    the current directory) to send remeshing information to a given processes.
-  !!    It directly compute it using contraints on velocity (as in corrected lambda
-  !!    scheme) When possible use it rather than AC_obtain_senders_com
-  subroutine AC_remesh_determine_communication(direction, gs, ind_group, rece_gap, send_gap, send_gap_abs, send_rank, cartography)
-    ! XXX Work only for periodic condition. For dirichlet conditions : it is
-    ! possible to not receive either rece_gap(1), either rece_gap(2) or none of
-    ! these two => detect it (track the first and the last particles) and deal with it.
-
-    ! Input/output
-    integer, intent(in)                                 :: direction
-    integer, dimension(2), intent(in)                   :: gs           ! group size
-    integer, dimension(2), intent(in)                   :: ind_group
-    integer, dimension(2, 2), intent(out)               :: rece_gap
-    integer(kind=4), dimension(gs(2),gs(1),2)           :: send_gap     ! minimal and maximal processus which contains the sub-domains where my 
-    ! particles will be remeshed for each line of the line group
-    integer, dimension(2), intent(in)                   :: send_gap_abs ! min and maximal processus which contains the sub-domains where my particles will be remeshed.
-    integer,dimension(send_gap_abs(1):send_gap_abs(2))&
-         &, intent(out)                                  :: send_rank    ! rank of processus to wich I will send data
-    integer, dimension(2+gs(2)*(2+3*gs(1)), &
-         & send_gap_abs(1):send_gap_abs(2)), intent(out) :: cartography
-    ! Other local variable
-    integer(kind=4)                                 :: proc_gap         ! gap between a processus coordinate (along the current 
-    ! direction) into the mpi-topology and my coordinate
-    integer                                         :: rankP            ! processus rank for shift (P= previous, N = next)
-    integer, dimension(2)                           :: tag_table        ! mpi message tag (for communicate rece_gap(1) and rece_gap(2))
-    integer, dimension(:,:), allocatable            :: first, last      ! Storage processus to which I will be the first (or the last) to send
-    ! remeshed particles
-    integer, dimension(2)                           :: first_condition  ! allowed range of value of proc_min and proc_max for being the first
-    integer, dimension(2)                           :: last_condition   ! allowed range of value of proc_min and proc_max for being the last
-    integer, dimension(:,:),allocatable             :: send_request     ! mpi status of nonblocking send
-    integer                                         :: ierr             ! mpi error code
-    integer, dimension(MPI_STATUS_SIZE)             :: statut           ! mpi status
-    integer                                         :: ind1, ind2       ! indice of the current line inside the group
-    integer                                         :: min_size         ! begin indice in first and last to stock indice along first dimension of the group line
-    integer                                         :: gp_size          ! group size
-    integer,dimension(2)                            :: rece_buffer      ! buffer for reception of rece_max
-    logical                                         :: begin_interval   ! ware we in the start of an interval ?
-
-    rece_gap(1,1) = 3*N(direction)
-    rece_gap(1,2) = -3*N(direction)
-    rece_gap(2,:) = 0
-    gp_size = gs(1)*gs(2)
-
-    allocate(send_request(send_gap_abs(1):send_gap_abs(2),3))
-    send_request(:,3) = 0
-
-    ! ===== Compute if I am first or last and determine the carography =====
-    min_size = 2 + gs(2)
-    ! Initialize first and last to determine if I am the the first or the last processes (considering the current direction) 
-    ! to require information from this processus
-    allocate(first(2,send_gap_abs(1):send_gap_abs(2)))
-    first(2,:) = 0  ! number of lines for which I am the first
-    allocate(last(2,send_gap_abs(1):send_gap_abs(2)))
-    last(2,:) = 0   ! number of lines for which I am the last
-    ! Initialize cartography
-    cartography(1,:) = 0            ! number of velocity values to receive
-    cartography(2,:) = min_size     ! number of element to send when sending cartography
-    ! And compute cartography, first and last !
-    do proc_gap = send_gap_abs(1), send_gap_abs(2)
-       first(1,proc_gap) = -proc_gap
-       last(1,proc_gap) = -proc_gap
-       first_condition(2) = proc_gap*N_proc(direction)+1
-       first_condition(1) = 1-2*bl_bound_size + first_condition(2)
-       last_condition(2)  = (proc_gap+1)*N_proc(direction)
-       last_condition(1)  = -1+2*bl_bound_size + last_condition(2)
-       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank(proc_gap), ierr)
-       do ind2 = 1, gs(2)
-          cartography(2+ind2,proc_gap) = 0    ! 2 x number of interval of concern line into the column i2
-          begin_interval = .true.
-          do ind1 = 1, gs(1)
-             ! Does proc_gap belongs to [send_gap(i1,i2,1);send_gap(i1,i2,2)]?
-             if((proc_gap>=send_gap(ind1,ind2,1)).and.(proc_gap<=send_gap(ind1,ind2,2))) then
-                ! Compute if I am the first.
-                if ((send_group_min(ind1,ind2)< first_condition(1)).AND. &
-                     & (send_group_max(ind1,ind2)>= first_condition(2))) then
-                   first(2,proc_gap) =  first(2,proc_gap)+1
-                end if
-                ! Compute if I am the last.
-                if ((send_group_max(ind1,ind2) > last_condition(1)) &
-                     & .AND.(send_group_min(ind1,ind2)<= last_condition(2))) then
-                   last(2,proc_gap) =  last(2,proc_gap)+1
-                end if
-                ! Update cartography // Needed even if target processus is myself as we us buffer in all the case (scalar field cannot be used directly during the remeshing)
-                if (begin_interval) then 
-                   cartography(2+ind2,proc_gap) =  cartography(2+ind2,proc_gap)+2
-                   cartography(cartography(2,proc_gap)+1,proc_gap) = ind1
-                   cartography(2,proc_gap) = cartography(2,proc_gap) + 2
-                   cartography(cartography(2,proc_gap),proc_gap) = ind1
-                   begin_interval = .false.
-                else
-                   cartography(cartography(2,proc_gap),proc_gap) = ind1
-                end if
-             else
-                begin_interval = .true.
-             end if
-          end do
-       end do
-    end do
-
-    ! ===== Send information about first and last  =====
-    tag_table = compute_tag(ind_group, tag_obtsend_NP, direction)
-    do proc_gap = send_gap_abs(1), send_gap_abs(2)
-       ! I am the first ?
-       if (first(2,proc_gap)>0) then
-          if(send_rank(proc_gap)/= D_rank(direction)) then
-             call mpi_Isend(first(1,proc_gap), 2, MPI_INTEGER, send_rank(proc_gap), tag_table(1), D_comm(direction), &
-                  & send_request(proc_gap,1), ierr)
-             send_request(proc_gap,3) = 1
-          else
-             rece_gap(1,1) = min(rece_gap(1,1), -proc_gap)
-             rece_gap(2,1) = rece_gap(2,1) + first(2,proc_gap)
-          end if
-       end if
-       ! I am the last ?
-       if (last(2,proc_gap)>0) then
-          if(send_rank(proc_gap)/= D_rank(direction)) then
-             call mpi_Isend(last(1,proc_gap), 2, MPI_INTEGER, send_rank(proc_gap), tag_table(2), D_comm(direction), &
-                  & send_request(proc_gap,2), ierr)
-             send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
-          else
-             rece_gap(1,2) = max(rece_gap(1,2), -proc_gap)
-             rece_gap(2,2) = rece_gap(2,2) + last(2,proc_gap)
-          end if
-       end if
-    end do
-
-    ! ===== Receive information form the first and the last processus which need a part of my local velocity field =====
-    do while(rece_gap(2,1) < gp_size)
-       call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
-       rece_gap(1,1) = min(rece_gap(1,1), rece_buffer(1))
-       rece_gap(2,1) = rece_gap(2,1) + rece_buffer(2)
-    end do
-    do while(rece_gap(2,2) < gp_size)
-       call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
-       rece_gap(1,2) = max(rece_gap(1,2), rece_buffer(1))
-       rece_gap(2,2) = rece_gap(2,2) + rece_buffer(2)
-    end do
-
-    ! ===== Free Isend buffer =====
-    do proc_gap = send_gap_abs(1), send_gap_abs(2)
-       select case (send_request(proc_gap,3))
-       case (3)
-          call mpi_wait(send_request(proc_gap,1), statut, ierr)
-          call mpi_wait(send_request(proc_gap,2), statut, ierr)
-       case (2)
-          call mpi_wait(send_request(proc_gap,2), statut, ierr)
-       case (1)
-          call mpi_wait(send_request(proc_gap,1), statut, ierr)
-       end select
-    end do
-
-  end subroutine AC_remesh_determine_communication
-
-
-  !> Determine the set of processes wich will send me information during the remeshing 
-  !! and compute for each of these processes the range of wanted data. Use implicit 
-  !! computation rather than communication (only possible if particle are gather by 
-  !! block whith contrainst on velocity variation - as corrected lambda formula.) -
-  !! work directly on a group of particles lines.
-  !     @param[in]    send_group_min  = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
-  !     @param[in]    send_group_max  = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
-  !!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
-  !!    @param[in]    ind_group       = coordinate of the current group of lines
-  !!    @param[out]   proc_min        = gap between my coordinate and the processes of minimal coordinate which will receive information from me
-  !!    @param[out]   proc_max        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
-  !!    @param[out]   rece_proc       = coordinate range of processes which will send me information during the remeshing.
-  !!    @param[in]    gp_s            = size of group of line along the current direction
-  !! @details
-  !!    Work on a group of line of size gs(1) x gs(2))
-  !!    Obtain the list of processts which are associated to sub-domain where my partticles
-  !!    will be remeshed and the list of processes wich contains particles which
-  !!    have to be remeshed in my sub-domain. This way, this procedure determine
-  !!    which processus need to communicate together in order to proceed to the
-  !!    remeshing (as in a parrallel context the real space is subdivised and each
-  !!    processus contains a part of it)
-  !!        In the same time, it computes for each processus with which I will
-  !!    communicate, the range of mesh point involved for each line of particles 
-  !!    inside the group and it stores it by using some sparse matrix technics 
-  !!    (see cartography defined in the algorithm documentation)
-  !!        This routine does not involve any computation to determine if
-  !!    a processus is the first or the last processes (considering its coordinate along 
-  !!    the current directory) to send remeshing information to a given processes.
-  !!    It directly compute it using contraints on velocity (as in corrected lambda
-  !!    scheme) When possible use it rather than AC_obtain_senders_com
-  !> Determine the set of processes wich will send me information during the remeshing 
-  !! and compute for each of these processes the range of wanted data. Use implicit 
-  !! computation rather than communication (only possible if particle are gather by 
-  !! block whith contrainst on velocity variation - as corrected lambda formula.) -
-  !! work directly on a group of particles lines.
-  !     @param[in]    send_group_min  = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
-  !     @param[in]    send_group_max  = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
-  !!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
-  !!    @param[in]    ind_group       = coordinate of the current group of lines
-  !!    @param[out]   proc_min        = gap between my coordinate and the processes of minimal coordinate which will receive information from me
-  !!    @param[out]   proc_max        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
-  !!    @param[out]   rece_proc       = coordinate range of processes which will send me information during the remeshing.
-  !!    @param[in]    gp_s            = size of group of line along the current direction
-  !! @details
-  !!    Work on a group of line of size gs(1) x gs(2))
-  !!    Obtain the list of processts which are associated to sub-domain where my partticles
-  !!    will be remeshed and the list of processes wich contains particles which
-  !!    have to be remeshed in my sub-domain. This way, this procedure determine
-  !!    which processus need to communicate together in order to proceed to the
-  !!    remeshing (as in a parrallel context the real space is subdivised and each
-  !!    processus contains a part of it)
-  !!        In the same time, it computes for each processus with which I will
-  !!    communicate, the range of mesh point involved for each line of particles 
-  !!    inside the group and it stores it by using some sparse matrix technics 
-  !!    (see cartography defined in the algorithm documentation)
-  !!        This routine does not involve any computation to determine if
-  !!    a processus is the first or the last processes (considering its coordinate along 
-  !!    the current directory) to send remeshing information to a given processes.
-  !!    It directly compute it using contraints on velocity (as in corrected lambda
-  !!    scheme) When possible use it rather than AC_obtain_senders_com
-  subroutine AC_obtain_senders_group(direction, gp_s, ind_group, proc_min, proc_max, rece_proc)
-    ! XXX Work only for periodic condition. For dirichlet conditions : it is
-    ! possible to not receive either rece_proc(1), either rece_proc(2) or none of
-    ! these two => detect it (track the first and the last particles) and deal with it.
-
-    ! Input/output
-    integer, intent(in)                             :: direction
-    integer, dimension(2), intent(in)               :: ind_group
-    integer(kind=4), dimension(:,:), intent(out)    :: proc_min, proc_max
-    integer, dimension(:,:,:), intent(out)          :: rece_proc
-    integer, dimension(2), intent(in)               :: gp_s
-    ! Other local variable
-    integer(kind=4)                                 :: proc_gap         ! gap between a processus coordinate (along the current 
-    ! direction) into the mpi-topology and my coordinate
-    integer                                         :: rankP, rankN     ! processus rank for shift (P= previous, N = next)
-    integer, dimension(2)                           :: tag_table        ! mpi message tag (for communicate rece_proc(1) and rece_proc(2))
-    integer                                         :: proc_max_abs     ! maximum of proc_max array
-    integer                                         :: proc_min_abs     ! minimum of proc_min array
-    integer, dimension(:,:), allocatable            :: first, last      ! Storage processus to which I will be the first (or the last) to send
-    ! remeshed particles
-    integer, dimension(2)                           :: first_condition  ! allowed range of value of proc_min and proc_max for being the first
-    integer, dimension(2)                           :: last_condition   ! allowed range of value of proc_min and proc_max for being the last
-    integer, dimension(:,:),allocatable             :: send_request     ! mpi status of nonblocking send
-    integer                                         :: ierr             ! mpi error code
-    integer, dimension(MPI_STATUS_SIZE)             :: statut           ! mpi status
-    integer                                         :: ind1, ind2       ! indice of the current line inside the group
-    integer                                         :: min_size         ! begin indice in first and last to stock indice along first dimension of the group line
-    integer                                         :: indice           ! internal indice
-    integer, dimension(1 + gp_s(2)*(1+gp_s(1)))         :: rece_buffer      ! buffer for reception of rece_max
-
-    rece_proc = 3*N(direction)
-
-    proc_min = floor(real(send_group_min-1)/N_proc(direction))
-    proc_max = floor(real(send_group_max-1)/N_proc(direction))
-    proc_min_abs = minval(proc_min)
-    proc_max_abs = maxval(proc_max)
-
-    allocate(send_request(proc_min_abs:proc_max_abs,3))
-    send_request(:,3) = 0
-
-    ! -- Determine if I am the first or the last to send information to a given
-    ! processus and sort line by target processes for which I am the first and
-    ! for which I am the last. --
-    tag_table = compute_tag(ind_group, tag_obtsend_NP, direction)
-    min_size = 2 + gp_s(2)
-    allocate(first(2*(gp_s(1)*gp_s(2)+1),proc_min_abs:proc_max_abs))
-    first(1,:) = min_size
-    allocate(last(2*(gp_s(1)*gp_s(2)+1),proc_min_abs:proc_max_abs))
-    last(1,:) = min_size
-    do proc_gap = proc_min_abs, proc_max_abs
-       first(2,proc_gap) = -proc_gap
-       last(2,proc_gap) = -proc_gap
-       first_condition(2) = proc_gap*N_proc(direction)+1
-       first_condition(1) = 1-2*bl_bound_size + first_condition(2)
-       last_condition(2)  = (proc_gap+1)*N_proc(direction)
-       last_condition(1)  = -1+2*bl_bound_size + last_condition(2)
-       do ind2 = 1, gp_s(2)
-          first(2+ind2,proc_gap) = 0
-          last(2+ind2,proc_gap) =  0
-          do ind1 = 1, gp_s(1)
-             ! Compute if I am the first.
-             if ((send_group_min(ind1,ind2)< first_condition(1)).AND. &
-                  & (send_group_max(ind1,ind2)>= first_condition(2))) then
-                first(2+ind2,proc_gap) =  first(2+ind2,proc_gap)+1
-                first(1,proc_gap) = first(1,proc_gap) + 1
-                first(first(1,proc_gap),proc_gap) = ind1
-             end if
-             ! Compute if I am the last.
-             if ((send_group_max(ind1,ind2) > last_condition(1)) &
-                  & .AND.(send_group_min(ind1,ind2)<= last_condition(2))) then
-                last(2+ind2,proc_gap) =  last(2+ind2,proc_gap)+1
-                last(1,proc_gap) = last(1,proc_gap) + 1
-                last(last(1,proc_gap),proc_gap) = ind1
-             end if
-          end do
-       end do
-    end do
-
-
-    ! -- Send information if I am the first or the last --
-    do proc_gap = proc_min_abs, proc_max_abs
-       ! I am the first ?
-       if (first(1,proc_gap)>min_size) then
-          ! Compute the rank of the target processus
-          call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
-          if(rankN /= D_rank(direction)) then
-             call mpi_Isend(first(2,proc_gap), first(1,proc_gap)-1, MPI_INTEGER, rankN, tag_table(1), D_comm(direction), &
-                  & send_request(proc_gap,1), ierr)
-             send_request(proc_gap,3) = 1
-          else
-             indice = min_size
-             do ind2 = 1, gp_s(2)
-                do ind1 = 1, first(2+ind2,proc_gap)
-                   indice = indice+1
-                   rece_proc(1,first(indice,proc_gap),ind2) = -proc_gap
-                end do
-             end do
-          end if
-       end if
-       ! I am the last ?
-       if (last(1,proc_gap)>min_size) then
-          ! Compute the rank of the target processus
-          call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
-          if(rankN /= D_rank(direction)) then
-             call mpi_Isend(last(2,proc_gap), last(1,proc_gap)-1, MPI_INTEGER, rankN, tag_table(2), D_comm(direction), &
-                  & send_request(proc_gap,2), ierr)
-             send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
-          else
-             indice = min_size
-             do ind2 = 1, gp_s(2)
-                do ind1 = 1, last(2+ind2,proc_gap)
-                   indice = indice+1
-                   rece_proc(2,last(indice,proc_gap),ind2) = -proc_gap
-                end do
-             end do
-          end if
-       end if
-    end do
-
-    ! -- Receive it --
-    ! size_max = size(rece_buffer) ! 2 + 2*gp_s(1)*gp_s(2)
-    do while(any(rece_proc(1,:,:) == 3*N(direction)))
-       call mpi_recv(rece_buffer(1), size(rece_buffer), MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
-       indice = min_size-1
-       do ind2 = 1, gp_s(2)
-          do ind1 = 1, rece_buffer(1+ind2)
-             indice = indice+1
-             rece_proc(1,rece_buffer(indice),ind2) = rece_buffer(1)
-          end do
-       end do
-    end do
-    do while(any(rece_proc(2,:,:) == 3*N(direction)))
-       call mpi_recv(rece_buffer(1), size(rece_buffer), MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
-       indice = min_size-1
-       do ind2 = 1, gp_s(2)
-          do ind1 = 1, rece_buffer(1+ind2)
-             indice = indice+1
-             rece_proc(2,rece_buffer(indice),ind2) = rece_buffer(1)
-          end do
-       end do
-    end do
-
-    ! -- Free Isend buffer --
-    do proc_gap = proc_min_abs, proc_max_abs
-       select case (send_request(proc_gap,3))
-       case (3)
-          call mpi_wait(send_request(proc_gap,1), statut, ierr)
-          call mpi_wait(send_request(proc_gap,2), statut, ierr)
-       case (2)
-          call mpi_wait(send_request(proc_gap,2), statut, ierr)
-       case (1)
-          call mpi_wait(send_request(proc_gap,1), statut, ierr)
-       end select
-    end do
-
-  end subroutine AC_obtain_senders_group
-
-
-  !> Determine the set of processes wich will send me information during the
-  !!  scalar remeshing by explicit (and exensive) way : communications !
-  !!    @param[in]    direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
-  !!    @param[in]    ind_group   = coordinate of the current group of lines
-  !!    @param[out]   proc_min    = gap between my coordinate and the processes of minimal coordinate which will receive information from me
-  !!    @param[out]   proc_max    = gap between my coordinate and the processes of maximal coordinate which will receive information from me
-  !!    @param[out]   rece_proc   = coordinate range of processes which will send me information during the remeshing.
-  !!    @param[in]    gp_s        = size of group of line along the current direction
-  !!    @param[in]    com         = integer used to distinguish this function from AC_obtain_senders_group.
-  !! @details
-  !!    Obtain the list of processus which contains some particles which belong to
-  !!    my subdomains after their advection (and thus which will be remeshing into
-  !!    my subdomain). This result is return as an interval [send_min; send_max].
-  !!    All the processus whose coordinate (into the current direction) belong to 
-  !!    this segment are involved into scalar remeshing into the current
-  !!    subdomains. Use this method when the sender are not predictable without
-  !!    communication, as in M'6 schemes for instance. More precisly, it
-  !!    correspond do scheme without bloc of particle involving velocity variation
-  !!    contrainsts to avoid that the distance between to particle grows (or dimishes)
-  !!    too much.
-  subroutine AC_obtain_senders_com(direction, gp_s, ind_group, proc_min, proc_max, rece_proc, com)
-    ! XXX Work only for periodic condition. See AC_obtain_senders. Adapt it for
-    ! other condition must be more easy.
-
-    ! Input/output
-    integer, intent(in)                             :: direction
-    integer, dimension(2), intent(in)               :: ind_group
-    integer(kind=4), dimension(:,:), intent(out)    :: proc_min, proc_max
-    integer, dimension(:,:,:), intent(out)          :: rece_proc
-    integer, dimension(2), intent(in)               :: gp_s
-    integer, intent(in)                             :: com
-    ! Other local variable
-    integer(kind=4)                                 :: proc_gap         ! gap between a processus coordinate (along the current 
-    ! direction) into the mpi-topology and my coordinate
-    integer                                         :: rankP, rankN     ! processus rank for shift (P= previous, N = next)
-    integer, dimension(2)                           :: tag_table        ! mpi message tag (for communicate rece_proc(1) and rece_proc(2))
-    integer, dimension(gp_s(1), gp_s(2))            :: proc_max_prev    ! maximum gap between previous processus and the receivers of its remeshing buffer
-    integer, dimension(gp_s(1), gp_s(2))            :: proc_min_next    ! minimum gap between next processus and the receivers of its remeshing buffer
-    integer                                         :: proc_max_abs     ! maximum of proc_max array
-    integer                                         :: proc_min_abs     ! minimum of proc_min array
-    integer, dimension(:,:), allocatable            :: first, last      ! Storage processus to which I will be the first (or the last) to send
-    ! remeshed particles
-    integer, dimension(:,:),allocatable             :: send_request     ! mpi status of nonblocking send
-    integer, dimension(2)                           :: send_request_gh  ! mpi status of nonblocking send (when exchanging "ghost")
-    integer                                         :: ierr             ! mpi error code
-    integer, dimension(MPI_STATUS_SIZE)             :: statut           ! mpi status
-    integer                                         :: ind1, ind2       ! indice of the current line inside the group
-    integer                                         :: nb
-    integer, dimension(2 + 2*gp_s(1)*gp_s(2))       :: rece_buffer      ! buffer for reception of rece_max
-
-
-    rece_proc = 3*N(direction)
-
-    proc_min = floor(real(send_group_min-1)/N_proc(direction))
-    proc_max = floor(real(send_group_max-1)/N_proc(direction))
-    proc_min_abs = minval(proc_min)
-    proc_max_abs = maxval(proc_max)
-
-    allocate(send_request(proc_min_abs:proc_max_abs,3))
-    send_request(:,3) = 0
-
-    ! -- Exchange send_block_min and send_block_max to determine if I am the first
-    ! or the last to send information to a given target processus. --
-    ! Compute message tag - we re-use tag_part_tag_NP id as using this procedure
-    ! suppose not using "AC_type_and_block"
-    tag_table = compute_tag(ind_group, tag_part_tag_NP, direction)
-    ! Send "ghost"
-    call mpi_Isend(proc_min(1,1), gp_s(1)*gp_s(2), MPI_INTEGER, neighbors(direction,1), tag_table(1), &
-         & D_comm(direction), send_request_gh(1), ierr)
-    call mpi_Isend(proc_max(1,1), gp_s(1)*gp_s(2), MPI_INTEGER, neighbors(direction,2), tag_table(2), &
-         & D_comm(direction), send_request_gh(2), ierr)
-    ! Receive it
-    call mpi_recv(proc_min_next(1,1), gp_s(1)*gp_s(2), MPI_INTEGER, neighbors(direction,2), tag_table(1), & 
-         & D_comm(direction), statut, ierr)
-    call mpi_recv(proc_max_prev(1,1), gp_s(1)*gp_s(2), MPI_INTEGER, neighbors(direction,1), tag_table(2), &
-         & D_comm(direction), statut, ierr)
-
-    ! -- Determine if I am the first or the last to send information to a given
-    ! processus and sort line by target processes for which I am the first and
-    ! for which I am the last. --
-    tag_table = compute_tag(ind_group, tag_obtsend_NP, direction)
-    allocate(first(2*(gp_s(1)*gp_s(2)+1),proc_min_abs:proc_max_abs))
-    first(2,:) = 2
-    allocate(last(2*(gp_s(1)*gp_s(2)+1),proc_min_abs:proc_max_abs))
-    last(2,:) = 2
-    do proc_gap = proc_min_abs, proc_max_abs
-       first(1,proc_gap) = -proc_gap
-       last(1,proc_gap) = -proc_gap
-    end do
-    do ind2 = 1, gp_s(2)
-       do ind1 = 1, gp_s(1)
-          ! Compute if I am the first, ie if:
-          ! a - proc_min <= proc_gap <= proc_max,
-          ! b - proc_gap > proc_max_prev -1.
-          do proc_gap = max(proc_min(ind1,ind2), proc_max_prev(ind1,ind2)), proc_max(ind1,ind2)
-             first(first(2,proc_gap)+1,proc_gap) = ind1
-             first(first(2,proc_gap)+2,proc_gap) = ind2
-             first(2 ,proc_gap) = first(2 ,proc_gap) + 2
-          end do
-          ! Compute if I am the last, ie if:
-          ! a - proc_min <= proc_gap <= proc_max,
-          ! b - proc_gap < proc_min_next+1.
-          do proc_gap = proc_min(ind1,ind2), min(proc_min_next(ind1,ind2), proc_max(ind1,ind2))
-             last(last(2,proc_gap)+1,proc_gap) = ind1
-             last(last(2,proc_gap)+2,proc_gap) = ind2
-             last(2 ,proc_gap) = last(2 ,proc_gap) + 2
-          end do
-       end do
-    end do
-
-
-    ! -- Send information if I am the first or the last --
-    do proc_gap = proc_min_abs, proc_max_abs
-       ! I am the first ?
-       if (first(2,proc_gap)>2) then
-          ! Compute the rank of the target processus
-          call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
-          if(rankN /= D_rank(direction)) then
-             call mpi_Isend(first(1,proc_gap), first(2,proc_gap), MPI_INTEGER, rankN, tag_table(1), D_comm(direction), &
-                  & send_request(proc_gap,1), ierr)
-             send_request(proc_gap,3) = 1
-          else
-             do nb = 3, first(2,proc_gap), 2
-                rece_proc(1,first(nb,proc_gap),first(nb+1,proc_gap)) = -proc_gap
-             end do
-          end if
-       end if
-       ! I am the last ?
-       if (last(2,proc_gap)>2) then
-          ! Compute the rank of the target processus
-          call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
-          if(rankN /= D_rank(direction)) then
-             call mpi_Isend(last(1,proc_gap), last(2,proc_gap), MPI_INTEGER, rankN, tag_table(2), D_comm(direction), &
-                  & send_request(proc_gap,2), ierr)
-             send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
-          else
-             do nb = 3, last(2,proc_gap), 2
-                rece_proc(2,last(nb,proc_gap),last(nb+1,proc_gap)) = -proc_gap
-             end do
-          end if
-       end if
-    end do
-
-    ! -- Receive it --
-    do while(any(rece_proc(1,:,:) == 3*N(direction)))
-       rece_buffer(2) = 2
-       call mpi_recv(rece_buffer(1), size(rece_buffer), MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
-       do nb = 3, rece_buffer(2), 2
-          rece_proc(1,rece_buffer(nb),rece_buffer(nb+1)) = rece_buffer(1)
-       end do
-    end do
-    do while(any(rece_proc(2,:,:) == 3*N(direction)))
-       rece_buffer(2) = 2
-       call mpi_recv(rece_buffer(1),size(rece_buffer), MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
-       do nb = 3, rece_buffer(2), 2
-          rece_proc(2,rece_buffer(nb),rece_buffer(nb+1)) = rece_buffer(1)
-       end do
-    end do
-
-    ! -- Free Isend buffer --
-    call mpi_wait(send_request_gh(1), statut, ierr)
-    call mpi_wait(send_request_gh(2), statut, ierr)
-    do proc_gap = proc_min_abs, proc_max_abs
-       select case (send_request(proc_gap,3))
-       case (3)
-          call mpi_wait(send_request(proc_gap,1), statut, ierr)
-          call mpi_wait(send_request(proc_gap,2), statut, ierr)
-       case (2)
-          call mpi_wait(send_request(proc_gap,2), statut, ierr)
-       case (1)
-          call mpi_wait(send_request(proc_gap,1), statut, ierr)
-       end select
-    end do
-
-  end subroutine AC_obtain_senders_com
-
-  ! ===================================================================
-  ! ====================     Remesh particles      ====================
-  ! ===================================================================
-
-  !> Remesh particle line with corrected lambda 2 formula - remeshing is done into
-  !! an real array
-  !!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
-  !!    @param[in]        p_pos_adim  = adimensionned  particles position
-  !!    @param[in]        scal1D      = scalar field to advect
-  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
-  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
-  !!    @param[in]        ind_min     = minimal indice of the send buffer
-  !!    @param[in]        ind_max     = maximal indice of the send buffer
-  !!    @param[in, out]   send_buffer = buffer use to remesh the scalar before to send it to the right subdomain
-  !! @details
-  !!     Use corrected lambda 2 remeshing formula. 
-  !! This remeshing formula depends on the particle type :
-  !!     1 - Is the particle tagged ?
-  !!     2 - Does it belong to a centered or a left block ?
-  !! Observe that tagged particles go by group of two : if the particles of a
-  !! block end are tagged, the one first one of the following block are
-  !! tagged too.
-  !! The following algorithm is write for block of minimal size.
-  !! @author = Jean-Baptiste Lagaert, LEGI/Ljk
-  subroutine AC_remesh_lambda2corrected_array(direction,p_pos_adim,scal1D,bl_type,bl_tag,ind_min,ind_max,send_buffer)
-
-    ! Input/Output
-    integer, intent(in)                                 :: direction
-    real(WP), dimension(:), intent(in)                  :: p_pos_adim
-    real(WP), dimension(:), intent(in)  :: scal1D
-    logical, dimension(:), intent(in)                   :: bl_type
-    logical, dimension(:), intent(in)                   :: bl_tag
-    integer, intent(in)                                 :: ind_min, ind_max
-    real(WP), dimension(ind_min:ind_max), intent(inout) :: send_buffer
-    ! Other local variables
-    integer     :: bl_ind                               ! indice of the current "block end".
-    integer     :: p_ind                                ! indice of the current particle
-
-    send_j_min = ind_min
-    send_j_max = ind_max
-
-    do p_ind = 1, N_proc(direction), bl_size
-       bl_ind = p_ind/bl_size + 1
-       if (bl_tag(bl_ind)) then
-          ! Tag case
-          ! XXX Debug : to activate only in purpose debug
-          !if (bl_type(ind).neqv. (.not. bl_type(ind+1))) then
-          !    write(*,'(a,x,3(L1,x),a,3(i0,a))'), 'error on remeshing particles: (tag,type(i), type(i+1)) =', &
-          !    & bl_tag(ind), bl_type(ind), bl_type(ind+1), ' and type must be different. Mesh point = (',i, ', ', j,', ',k,')'
-          !    write(*,'(a,x,i0)'),  'paramètres du blocs : ind =', bl_ind
-          !    stop
-          !end if
-          ! XXX Debug - end
-          if (bl_type(bl_ind)) then
-             ! tagged, the first particle belong to a centered block and the last to left block.
-             call AC_remesh_tag_CL(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
-          else
-             ! tagged, the first particle belong to a left block and the last to centered block.
-             call AC_remesh_tag_LC(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
-          end if
-       else
-          if (bl_type(bl_ind)) then
-             ! First particle is remeshed with center formula
-             call AC_remesh_center(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
-          else
-             ! First particle is remeshed with left formula
-             call AC_remesh_left(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
-          end if
-          if (bl_type(bl_ind+1)) then
-             ! Second particle is remeshed with center formula
-             call AC_remesh_center(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
-          else
-             ! Second particle is remeshed with left formula
-             call AC_remesh_left(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
-          end if
-       end if
-    end do
-
-  end subroutine AC_remesh_lambda2corrected_array
-
-
-  !> Remesh particle line with corrected lambda 2 formula - remeshing is done into
-  !! an array of pointer to real
-  !!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
-  !!    @param[in]        p_pos_adim  = adimensionned  particles position
-  !!    @param[in]        scal1D      = scalar field to advect
-  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
-  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
-  !!    @param[in]        ind_min     = minimal indice of the send buffer
-  !!    @param[in]        ind_max     = maximal indice of the send buffer
-  !!    @param[in, out]   send_pter   = array of pointers to the buffer use to remesh the scalar before to send it to the right subdomain
-  !! @details
-  !!     Use corrected lambda 2 remeshing formula. 
-  !! This remeshing formula depends on the particle type :
-  !!     1 - Is the particle tagged ?
-  !!     2 - Does it belong to a centered or a left block ?
-  !! Observe that tagged particles go by group of two : if the particles of a
-  !! block end are tagged, the one first one of the following block are
-  !! tagged too.
-  !! The following algorithm is write for block of minimal size.
-  !! @author = Jean-Baptiste Lagaert, LEGI/Ljk
-  subroutine AC_remesh_lambda2corrected_pter(direction,p_pos_adim,scal1D,bl_type,bl_tag,ind_min,ind_max,send_buffer)
-
-    ! Input/Output
-    integer, intent(in)                                         :: direction
-    real(WP), dimension(:), intent(in)                          :: p_pos_adim
-    real(WP), dimension(:), intent(in)          :: scal1D
-    logical, dimension(:), intent(in)                           :: bl_type
-    logical, dimension(:), intent(in)                           :: bl_tag
-    integer, intent(in)                                         :: ind_min, ind_max
-    type(real_pter), dimension(ind_min:ind_max), intent(inout)  :: send_buffer
-    ! Other local variables
-    integer     :: bl_ind                               ! indice of the current "block end".
-    integer     :: p_ind                                ! indice of the current particle
-
-    send_j_min = ind_min
-    send_j_max = ind_max
-
-    do p_ind = 1, N_proc(direction), bl_size
-       bl_ind = p_ind/bl_size + 1
-       if (bl_tag(bl_ind)) then
-          ! Tag case
-          ! XXX Debug : to activate only in purpose debug
-          !if (bl_type(ind).neqv. (.not. bl_type(ind+1))) then
-          !    write(*,'(a,x,3(L1,x),a,3(i0,a))'), 'error on remeshing particles: (tag,type(i), type(i+1)) =', &
-          !    & bl_tag(ind), bl_type(ind), bl_type(ind+1), ' and type must be different. Mesh point = (',i, ', ', j,', ',k,')'
-          !    write(*,'(a,x,i0)'),  'paramètres du blocs : ind =', bl_ind
-          !    stop
-          !end if
-          ! XXX Debug - end
-          if (bl_type(bl_ind)) then
-             ! tagged, the first particle belong to a centered block and the last to left block.
-             !!call AC_remesh_tag_CL(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
-          else
-             ! tagged, the first particle belong to a left block and the last to centered block.
-             !!call AC_remesh_tag_LC(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
-          end if
-       else
-          if (bl_type(bl_ind)) then
-             ! First particle is remeshed with center formula
-             !!call AC_remesh_center(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
-          else
-             ! First particle is remeshed with left formula
-             !!call AC_remesh_left(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
-          end if
-          if (bl_type(bl_ind+1)) then
-             ! Second particle is remeshed with center formula
-             !!call AC_remesh_center(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
-          else
-             ! Second particle is remeshed with left formula
-             !!call AC_remesh_left(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
-          end if
-       end if
-    end do
-
-  end subroutine AC_remesh_lambda2corrected_pter
-
-
-  !> Remesh particle line with corrected lambda 2 formula
-  !!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
-  !!    @param[in]        p_pos_adim  = adimensionned  particles position
-  !!    @param[in]        scal1D      = scalar field to advect
-  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
-  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
-  !!    @param[in]        ind_min     = minimal indice of the send buffer
-  !!    @param[in]        ind_max     = maximal indice of the send buffer
-  !!    @param[in, out]   send_buffer = buffer use to remesh the scalar before to send it to the right subdomain
-  !! @details
-  !!     Use corrected lambda 2 remeshing formula. 
-  !! This remeshing formula depends on the particle type :
-  !!     1 - Is the particle tagged ?
-  !!     2 - Does it belong to a centered or a left block ?
-  !! Observe that tagged particles go by group of two : if the particles of a
-  !! block end are tagged, the one first one of the following block are
-  !! tagged too.
-  !! The following algorithm is write for block of minimal size.
-  !! @author = Jean-Baptiste Lagaert, LEGI/Ljk
-  subroutine AC_remesh_lambda4corrected(direction, p_pos_adim, scal1D, bl_type, bl_tag, ind_min, ind_max, send_buffer)
-
-    ! Input/Output
-    integer, intent(in)                                 :: direction
-    real(WP), dimension(:), intent(in)                  :: p_pos_adim
-    real(WP), dimension(:), intent(in)  :: scal1D
-    logical, dimension(:), intent(in)                   :: bl_type
-    logical, dimension(:), intent(in)                   :: bl_tag
-    integer, intent(in)                                 :: ind_min, ind_max
-    real(WP), dimension(ind_min:ind_max), intent(inout) :: send_buffer
-    ! Other local variables
-    integer     :: bl_ind                               ! indice of the current "block end".
-    integer     :: p_ind                                ! indice of the current particle
-
-    send_j_min = ind_min
-    send_j_max = ind_max
-
-    do p_ind = 1, N_proc(direction), bl_size
-       bl_ind = p_ind/bl_size + 1
-       if (bl_tag(bl_ind)) then 
-          ! Tagged case
-          if (bl_type(bl_ind)) then
-             ! tagged, the first particle belong to a centered block and the last to left block.
-             call AC_remesh_O4_tag_CL(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), &
-                  & p_pos_adim(p_ind+2), scal1D(p_ind+2), p_pos_adim(p_ind+3), scal1D(p_ind+3), send_buffer)
-          else
-             ! tagged, the first particle belong to a left block and the last to centered block.
-             call AC_remesh_O4_tag_LC(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), &
-                  & p_pos_adim(p_ind+2), scal1D(p_ind+2), p_pos_adim(p_ind+3), scal1D(p_ind+3), send_buffer)
-          end if
-       else
-          ! No tag
-          if (bl_type(bl_ind)) then 
-             call AC_remesh_O4_center(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
-             call AC_remesh_O4_center(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
-          else
-             call AC_remesh_O4_left(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
-             call AC_remesh_O4_left(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
-          end if
-          if (bl_type(bl_ind+1)) then 
-             call AC_remesh_O4_center(p_pos_adim(p_ind+2),scal1D(p_ind+2), send_buffer)
-             call AC_remesh_O4_center(p_pos_adim(p_ind+3),scal1D(p_ind+3), send_buffer)
-          else
-             call AC_remesh_O4_left(p_pos_adim(p_ind+2),scal1D(p_ind+2), send_buffer)
-             call AC_remesh_O4_left(p_pos_adim(p_ind+3),scal1D(p_ind+3), send_buffer)
-          end if
-       end if
-    end do
-
-  end subroutine AC_remesh_lambda4corrected
-
-  !> Left remeshing formula of order 2
-  !!      @param[in]       pos_adim= adimensionned particle position
-  !!      @param[in]       sca     = scalar advected by the particle
-  !!      @param[in,out]   buffer  = temporaly remeshed scalar field
-  subroutine AC_remesh_left(pos_adim, sca, buffer)
-
-    !Input/Ouput
-    real(WP), intent(in)                                        :: pos_adim, sca
-    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
-    ! Ohter local variables
-    integer     :: j0                       ! indice of the the nearest mesh points
-    real(WP)    :: bM, b0, bP               ! interpolation weight for the particles
-    real(WP)    :: y0                       ! adimensionned distance to mesh points
-    ! Mesh point used in remeshing formula
-    j0 = floor(pos_adim)
-    !j0 = floor(pos/d_sc(2))
-
-    ! Distance to mesh points
-    y0 = (pos_adim - float(j0))
-
-    ! Interpolation weights
-    bM=0.5*y0*(y0-1.)
-    b0=1.-y0**2
-    !bP=0.5*y0*(y0+1.)
-    bP=1. - (b0+bM)
-
-    ! remeshing
-    buffer(j0-1) = buffer(j0-1)   + bM*sca
-    buffer(j0)   = buffer(j0)     + b0*sca
-    buffer(j0+1) = buffer(j0+1)   + bP*sca
-
-  end subroutine AC_remesh_left
-
-  !> Centered remeshing formula of order 2
-  !!      @param[in]       pos_adim= adimensionned particle position
-  !!      @param[in]       sca     = scalar advected by the particle
-  !!      @param[in,out]   buffer  = temporaly remeshed scalar field
-  subroutine AC_remesh_center(pos_adim, sca, buffer)
-
-    ! Input/output
-    real(WP), intent(in)                                        :: pos_adim, sca
-    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
-    ! Other local variables
-    integer     :: j0                       ! indice of the the nearest mesh points
-    real(WP)    :: bM, b0, bP               ! interpolation weight for the particles
-    real(WP)    :: y0             ! adimensionned distance to mesh points
-
-    j0 = nint(pos_adim)
-    !j0 = nint(pos/d_sc(2))
-
-    ! Distance to mesh points
-    y0 = (pos_adim - float(j0))
-    !y0 = (pos - float(j0)*d_sc(2))/d_sc(2)
-
-    ! Interpolation weights
-    bM=0.5*y0*(y0-1.)
-    b0=1.-y0**2
-    !bP=0.5*y0*(y0+1.)
-    bP=1. -b0 - bM
-
-    ! remeshing
-    buffer(j0-1) = buffer(j0-1)   + bM*sca
-    buffer(j0)   = buffer(j0)     + b0*sca
-    buffer(j0+1) = buffer(j0+1)   + bP*sca
-
-  end subroutine AC_remesh_center
-
-
-  !> Corrected remeshing formula for transition from Centered block to a Left block with a different indice (tagged particles)
-  !!    @param[in]       pos_adim= adimensionned particle position
-  !!    @param[in]       sca     = scalar advected by this particle
-  !!    @param[in]       posP_ad = adimensionned position of the second particle
-  !!    @param[in]       scaP    = scalar advected by this particle
-  !!    @param[in,out]   buffer  = temporaly remeshed scalar field
-  !! @details 
-  !!    Remeshing formula devoted to tagged particles.
-  !!    The particle group send into argument is composed of a block end and of the 
-  !!    begining of the next block. The first particles belong to a centered block
-  !!    and the last to a left one. The block have difference indice (tagged
-  !!    particles) and we have to use corrected formula.
-  subroutine AC_remesh_tag_CL(pos_adim, sca, posP_ad, scaP, buffer)
-
-    ! Input/Output
-    real(WP), intent(in)                                        :: pos_adim, sca, posP_ad, scaP
-    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
-    ! Other local variables
-    integer     :: jM, j0, jP               ! indice of the the nearest mesh points
-    ! (they depend on the block type)
-    integer     :: j0_bis                   ! indice of the the nearest mesh point for the indP=ind+1 particle
-    real(WP)    :: aM, a0, bP, b0           ! interpolation weight for the particles
-    real(WP)    :: y0, y0_bis               ! adimensionned distance to mesh points
-
-    j0 = nint(pos_adim)
-    !j0 = nint(pos/d_sc(2))
-    j0_bis = floor(posP_ad)
-    !j0_bis = floor(posP/d_sc(2))
-    jM=j0-1
-    jP=j0+1
-
-    y0 = (pos_adim - float(j0))
-    !y0 = (pos - float(j0)*d_sc(2))/d_sc(2)
-    y0_bis = (posP_ad - float(j0_bis))
-    !y0_bis = (posP - float(j0_bis)*d_sc(2))/d_sc(2)
-
-    aM=0.5*y0*(y0-1)
-    a0=1.-aM
-    bP=0.5*y0_bis*(y0_bis+1.)
-    b0=1.-bP
-
-    ! Remeshing
-    buffer(jM)=buffer(jM)+aM*sca
-    buffer(j0)=buffer(j0)+a0*sca+b0*scaP
-    buffer(jP)=buffer(jP)+bP*scaP
-
-  end subroutine AC_remesh_tag_CL
-
-
-  !> Corrected remeshing formula for transition from Left block to a Centered  block with a different indice (tagged particles)
-  !!    @param[in]       pos_adim= adimensionned particle position
-  !!    @param[in]       sca     = scalar advected by this particle
-  !!    @param[in]       posP_ad = adimensionned position of the second particle
-  !!    @param[in]       scaP    = scalar advected by this particle
-  !!    @param[in,out]   buffer  = temporaly remeshed scalar field
-  !! @details 
-  !!    Remeshing formula devoted to tagged particles.
-  !!    The particle group send into argument is composed of a block end and of the 
-  !!    begining of the next block. The first particles belong to a left block
-  !!    and the last to a centered one. The block have difference indice (tagged
-  !!    particles) and we have to use corrected formula.
-  subroutine AC_remesh_tag_LC(pos_adim, sca, posP_ad, scaP, buffer)
-
-    ! Input/Output
-    real(WP), intent(in)                                        :: pos_adim, sca, posP_ad, scaP
-    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
-    ! Other local variables
-    integer     :: jM, j0, jP, jP2, jP3             ! indice of the the nearest mesh points
-    ! (they depend on the block type)
-    integer     :: j0_bis                           ! indice of the the nearest mesh point for the indP=ind+1 particle
-    real(WP)    :: aM, a0, aP,aP2, b0, bP, bP2, bP3 ! interpolation weight for the particles
-    real(WP)    :: y0,yM_bis,y0_bis,yP_bis   ! adimensionned distance to mesh points
-    
-
-    ! Indice of mesh point used in order to remesh
-    j0 = floor(pos_adim)
-    !j0 = floor(pos/d_sc(2))
-    j0_bis = nint(posP_ad)
-    !j0_bis = nint(posP/d_sc(2))
-    jM=j0-1
-    jP=j0+1
-    jP2=j0+2
-    jP3=j0+3
-
-    ! Distance to mesh point
-    y0 = (pos_adim - float(j0))
-    !y0 = (pos - float(j0)*d_sc(2))/d_sc(2)
-    y0_bis = (posP_ad - float(j0_bis))
-    !y0_bis = (posP - float(j0_bis)*d_sc(2))/d_sc(2)
-    yP_bis=y0_bis+1
-    yM_bis=y0_bis-1
-
-    ! Interpolation weight
-    a0=1-y0**2
-    aP=y0
-    !aM=y0*yM/2.
-    aM = 0.5-(a0+aP)/2.
-    aP2=aM
-    bP=-y0_bis
-    bP2=1-y0_bis**2
-    !b0=y0_bis*yP_bis/2.
-    b0 = 0.5-(bP+bP2)/2.
-    bP3=b0
-
-    ! Remeshing
-    buffer(jM)= buffer(jM)+aM*sca
-    buffer(j0)= buffer(j0)+a0*sca+b0*scaP
-    buffer(jP)= buffer(jP)+aP*sca+bP*scaP
-    buffer(jP2)=buffer(jP2)+aP2*sca+bP2*scaP
-    buffer(jP3)=buffer(jP3)+bP3*scaP
-
-  end subroutine AC_remesh_tag_LC
-
-
-  !> Left remeshing formula of order 4
-  !!      @param[in]       pos_adim= adimensionned particle position
-  !!      @param[in]       sca     = scalar advected by the particle
-  !!      @param[in,out]   buffer  = temporaly remeshed scalar field
-  subroutine AC_remesh_O4_left(pos_adim, sca, buffer)
-
-    !Input/Ouput
-    real(WP), intent(in)                                        :: pos_adim, sca
-    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
-    ! Ohter local variables
-    integer     :: j0                       ! indice of the the nearest mesh points
-    real(WP)    :: bM2, bM, b0, bP, bP2     ! interpolation weight for the particles
-    real(WP)    :: y0               ! adimensionned distance to mesh points
-    ! Mesh point used in remeshing formula
-    j0 = floor(pos_adim)
-    !j0 = floor(pos/d_sc(2))
-
-    ! Distance to mesh points
-    y0 = (pos_adim - float(j0))
-
-    ! Interpolation weights
-    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
-    bM2=y0*(2.+y0*(-1.+y0*(-2.+y0)))/24.0
-    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
-    bM =y0*(-4.+y0*(4.+y0*(1.-y0)))/6.0
-    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0
-    bP =y0*(4+y0*(4-y0*(1.+y0)))/6.0
-    !bP2=(y0-1.)*y0*(y0+1.)*(y0+2.)/24.0
-    bP2=y0*(-2.+y0*(-1.+y0*(2.+y0)))/24.0
-    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
-    b0 = 1. -(bM2+bM+bP+bP2)
-
-    ! remeshing
-    buffer(j0-2) = buffer(j0-2)   + bM2*sca
-    buffer(j0-1) = buffer(j0-1)   + bM*sca
-    buffer(j0)   = buffer(j0)     + b0*sca
-    buffer(j0+1) = buffer(j0+1)   + bP*sca
-    buffer(j0+2) = buffer(j0+2)   + bP2*sca
-
-  end subroutine AC_remesh_O4_left
-
-  !> Centered remeshing formula of order 4
-  !!      @param[in]       pos_adim= adimensionned particle position
-  !!      @param[in]       sca     = scalar advected by the particle
-  !!      @param[in,out]   buffer  = temporaly remeshed scalar field
-  subroutine AC_remesh_O4_center(pos_adim, sca, buffer)
-
-    ! Input/output
-    real(WP), intent(in)                                        :: pos_adim, sca
-    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
-    ! Other local variables
-    integer     :: j0                       ! indice of the the nearest mesh points
-    real(WP)    :: bM2, bM, b0, bP, bP2     ! interpolation weight for the particles
-    real(WP)    :: y0             ! adimensionned distance to mesh points
-    ! Mesh point used in remeshing formula
-    j0 = nint(pos_adim)
-
-    ! Distance to mesh points
-    y0 = (pos_adim - float(j0))
-
-    ! Interpolation weights
-    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
-    bM2=y0*(2.+y0*(-1.+y0*(-2.+y0)))/24.0
-    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
-    bM =y0*(-4.+y0*(4.+y0*(1.-y0)))/6.0
-    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0
-    bP =y0*(4+y0*(4-y0*(1.+y0)))/6.0
-    !bP2=(y0-1.)*y0*(y0+1.)*(y0+2.)/24.0
-    bP2=y0*(-2.+y0*(-1.+y0*(2.+y0)))/24.0
-    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
-    b0 = 1. -(bM2+bM+bP+bP2)
-
-    ! remeshing
-    buffer(j0-2) = buffer(j0-2)   + bM2*sca
-    buffer(j0-1) = buffer(j0-1)   + bM*sca
-    buffer(j0)   = buffer(j0)     + b0*sca
-    buffer(j0+1) = buffer(j0+1)   + bP*sca
-    buffer(j0+2) = buffer(j0+2)   + bP2*sca
-
-  end subroutine AC_remesh_O4_center
-
-
-  !> Order 4 corrected remeshing formula for transition from Centered block to a Left block with a different indice (tagged particles)
-  !!    @param[in]       posM_ad = adimensionned position of the first particle
-  !!    @param[in]       scaM    = scalar advected by the first particle
-  !!    @param[in]       pos_adim= adimensionned particle position
-  !!    @param[in]       sca     = scalar advected by this particle
-  !!    @param[in]       posP_ad = adimensionned position of the second particle
-  !!    @param[in]       scaP    = scalar advected by this particle
-  !!    @param[in]       posP2_ad= adimensionned position of the fourth (and last) particle
-  !!    @param[in]       scaP2   = scalar advected by this particle
-  !!    @param[in,out]   buffer  = temporaly remeshed scalar field
-  !! @details 
-  !!    Remeshing formula devoted to tagged particles.
-  !!    The particle group send into argument is composed of a block end and of the 
-  !!    begining of the next block. The first particles belong to a centered block
-  !!    and the last to a left one. The block have difference indice (tagged
-  !!    particles) and we have to use corrected formula.
-  subroutine AC_remesh_O4_tag_CL(posM_ad, scaM, pos_adim, sca, posP_ad, scaP, posP2_ad, scaP2, buffer)
-
-    ! Input/Output
-    real(WP), intent(in)                                        :: pos_adim, sca, posP_ad, scaP
-    real(WP), intent(in)                                        :: posM_ad, scaM, posP2_ad, scaP2
-    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
-    ! Other local variables
-    integer     :: jM, j0, jP, jP2          ! indice of the the nearest mesh points
-    ! (they depend on the block type)
-    real(WP)    :: aM3, aM2, aM, a0         ! interpolation weight for the particles
-    real(WP)    :: bM2, bM, b0, bP          ! interpolation weight for the particles
-    real(WP)    :: cM, c0, cP, cP2          ! interpolation weight for the particles
-    real(WP)    :: e0, eP, eP2, eP3         ! interpolation weight for the particles
-    real(WP)    :: yM, y0, yP, yP2          ! adimensionned distance to mesh points for each particles
-
-    ! Indice of mesh point used in order to remesh
-    jM = nint(posM_ad)
-    j0 = nint(pos_adim)
-    jP = floor(posP_ad)
-    jP2= floor(posP2_ad)
-
-    ! Distance to mesh point
-    yM = (posM_ad  - float(jM))
-    y0 = (pos_adim - float(j0))
-    yP = (posP_ad  - float(jP))
-    yP2= (posP2_ad - float(jP2))
-
-    ! Interpolation weights
-    !aM3=(yM-2.)*(yM-1.)*yM*(yM+1.)/24.0
-    aM3=yM*(2.+yM*(-1.+yM*(-2.+yM)))/24.0
-    !aM2=(2.-yM)*(yM-1.)*yM*(yM+2.)/6.0
-    aM2=yM*(-4.+yM*(4.+yM*(1.-yM)))/6.0
-    !aM =(yM-2.)*(yM-1.)*(yM+1.)*(yM+2.)/4.0
-    aM =(4.+(yM**2)*(-5.+yM**2))/4.0
-    !a0 =((2.-yM)*yM*(yM+1.)*(yM+2.)/6.0) + ((yM-1.)*yM*(yM+1.)*(yM+2.)/24.0)
-    a0 = 1. - (aM3+aM2+aM)
-
-    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
-    bM2=y0*(2.+y0*(-1.+y0*(-2.+y0)))/24.0
-    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
-    bM =y0*(-4.+y0*(4.+y0*(1.-y0)))/6.0
-    !bP =((y0+1)-1.)*(y0+1)*((y0+1)+1.)*((y0+1)+2.)/24.0 
-    bP =y0*(6.+y0*(11+y0*(6+y0)))/24.0 
-    !b0 =((y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0) + ((2.-y0)*y0*(y0+1.)*(y0+2.)/6.0) &
-    !        & + ((y0-1.)*y0*(y0+1.)*(y0+2.)/24.0) - bP
-    b0 = 1. - (bM2+bM+bP)
-
-    !cM =((yP-1.)-2.)*((yP-1.)-1.)*(yP-1.)*((yP-1.)+1.)/24.0
-    cM =yP*(-6.+yP*(11.+yP*(-6.+yP)))/24.0
-    !cP =(2.-yP)*yP*(yP+1.)*(yP+2.)/6.0
-    cP =yP*(4.+yP*(4.-yP*(1.+yP)))/6.0
-    !cP2=(yP-1.)*yP*(yP+1.)*(yP+2.)/24.0
-    cP2=yP*(-2.+yP*(-1.+yP*(2.+yP)))/24.0
-    !c0 =((yP-2.)*(yP-1.)*yP*(yP+1.)/24.0)+((2.-yP)*(yP-1.)*yP*(yP+2.)/6.0) &
-    !        & + ((yP-2.)*(yP-1.)*(yP+1.)*(yP+2.)/4.0) - cM
-    c0 = 1. - (cM+cP+cP2)
-
-    !eP =(yP2-2.)*(yP2-1.)*(yP2+1.)*(yP2+2.)/4.0
-    eP =1.+((yP2**2)*(-5+yP2**2)/4.0)
-    !eP2=(2.-yP2)*yP2*(yP2+1.)*(yP2+2.)/6.0
-    eP2=yP2*(4.+yP2*(4.-yP2*(1+yP2)))/6.0
-    !eP3=(yP2-1.)*yP2*(yP2+1.)*(yP2+2.)/24.0
-    eP3=yP2*(-2.+yP2*(-1.+yP2*(2+yP2)))/24.0
-    !e0 =((yP2-2.)*(yP2-1.)*yP2*(yP2+1.)/24.0) + ((2.-yP2)*(yP2-1.)*yP2*(yP2+2.)/6.0)
-    e0 = 1. - (eP+eP2+eP3)
-
-    ! remeshing
-    buffer(j0-3) = buffer(j0-3)   +aM3*scaM 
-    buffer(j0-2) = buffer(j0-2)   +aM2*scaM +bM2*sca
-    buffer(j0-1) = buffer(j0-1)   + aM*scaM + bM*sca  + cM*scaP
-    buffer(j0)   = buffer(j0)     + a0*scaM + b0*sca  + c0*scaP + e0*scaP2
-    buffer(j0+1) = buffer(j0+1)             + bP*sca  + cP*scaP + eP*scaP2
-    buffer(j0+2) = buffer(j0+2)                       +cP2*scaP +eP2*scaP2
-    buffer(j0+3) = buffer(j0+3)                                 +eP3*scaP2
-
-  end subroutine AC_remesh_O4_tag_CL
-
-
-  !> Corrected remeshing formula of order 3 for transition from Left block to a centered
-  !! block with a different indice (tagged particles). Use it for lambda 4 corrected scheme.
-  !!    @param[in]       posM_ad = adimensionned position of the first particle
-  !!    @param[in]       scaM    = scalar advected by the first particle
-  !!    @param[in]       pos_adim= adimensionned position of the second particle (the last of the first block)
-  !!    @param[in]       sca     = scalar advected by this particle
-  !!    @param[in]       posP_ad = adimensionned position of the third particle (wich is the first of the second block)
-  !!    @param[in]       scaP    = scalar advected by this particle
-  !!    @param[in]       posP2_ad= adimensionned position of the fourth (and last) particle
-  !!    @param[in]       scaP2   = scalar advected by this particle
-  !!    @param[in,out]   buffer  = temporaly remeshed scalar field
-  !! @details 
-  !!    Remeshing formula devoted to tagged particles.
-  !!    The particle group send into argument is composed of a block end and of the 
-  !!    begining of the next block. The first particles belong to a left block
-  !!    and the last to a centered one. The block have difference indice (tagged
-  !!    particles) and we have to use corrected formula.
-  subroutine AC_remesh_O4_tag_LC(posM_ad, scaM, pos_adim, sca, posP_ad, scaP, posP2_ad, scaP2, buffer)
-
-    ! Input/Output
-    real(WP), intent(in)                                        :: pos_adim, sca, posP_ad, scaP
-    real(WP), intent(in)                                        :: posM_ad, scaM, posP2_ad, scaP2
-    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
-    ! Other local variables
-    integer     :: jM, j0, jP, jP2          ! indice of the the nearest mesh points
-    ! (they depend on the block type)
-    real(WP)    :: aM3, aM2, aM, a0, aP,aP2 ! interpolation weight for the particles
-    real(WP)    :: bM2, bM, b0, bP, bP2,bP3 ! interpolation weight for the particles
-    real(WP)    :: cM, c0, cP, cP2, cP3,cP4 ! interpolation weight for the particles
-    real(WP)    :: e0, eP, eP2, eP3,eP4,ep5 ! interpolation weight for the particles
-    real(WP)    :: yM, y0, yP, yP2          ! adimensionned distance to mesh points for each particles
-
-
-    ! Indice of mesh point used in order to remesh
-    jM = floor(posM_ad)
-    j0 = floor(pos_adim)
-    jP = nint(posP_ad)
-    jP2= nint(posP2_ad)
-
-    ! Distance to mesh point
-    yM = (posM_ad  - float(jM))
-    y0 = (pos_adim - float(j0))
-    yP = (posP_ad  - float(jP))
-    yP2= (posP2_ad - float(jP2))
-
-    ! Interpolation weights
-    !aM3=(yM-2.)*(yM-1.)*yM*(yM+1.)/24.0
-    aM3=yM*(2.+yM*(-1.+yM*(-2.+yM)))/24.0
-    !aM2=(2.-yM)*(yM-1.)*yM*(yM+2.)/6.0
-    aM2 =yM*(-4.+yM*(4.+yM*(1.-yM)))/6.0
-    !aM =(yM-2.)*(yM-1.)*(yM+1.)*(yM+2.)/4.0
-    aM =(4.+(yM**2)*(-5.+yM**2))/4.0
-    !a0 =((2.-yM)*yM*(yM+1.)*(yM+2.)/6.0) 
-    a0 =yM*(4+yM*(4-yM*(1.+yM)))/6.0
-    !aP2=(((yM-1.)-1.)*(yM-1.)*((yM-1.)+1.)*((yM-1.)+2.)/24.0)
-    !aP2=yM*(yM-2.)*(yM-1.)*(yM+1.)/24.0
-    aP2=aM3
-    !aP =((yM-1.)*yM*(yM+1.)*(yM+2.)/24.0) - aP2
-    !aP = 1.0 - (aM3+aM2+aM+a0+aP2)
-    aP = 1.0 - (2.*aM3+aM2+aM+a0)
-
-    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
-    bM2=y0*(2.+y0*(-1.+y0*(-2.+y0)))/24.0
-    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
-    bM =y0*(-4.+y0*(4.+y0*(1.-y0)))/6.0
-    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
-    b0 =(4.+(y0**2)*(-5.+y0**2))/4.0
-    !bP2=(2.-(y0-1.))*(y0-1.)*((y0-1.)+1.)*((y0-1.)+2.)/6.0
-    !bP2=y0*(3.-y0)*(y0-1.)*(y0+1.)/6.0
-    bP2=y0*(-3.+y0*(1.+y0*(3.-y0)))/6.0
-    !bP3=((y0-1.)-1.)*(y0-1.)*((y0-1.)+1.)*((y0-1.)+2.)/24.0
-    !bP3=y0*(y0-2.)*(y0-1.)*(y0+1.)/24.0
-    bP3 = bM2
-    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0 + ((y0-1.)*y0*(y0+1.)*(y0+2.)/24.0) &
-    !       & - (bP2 + bP3)
-    !bP = 1.0 - (bM2 + bM + b0 + bP2 + bP3)
-    bP = 1.0 - (2*bM2 + bM + b0 + bP2)
-
-    !cM =((yP+1)-2.)*((yP+1)-1.)*(yP+1)*((yP+1)+1.)/24.0
-    cM =(yP-1.)*yP*(yP+1)*(yP+2.)/24.0
-    !cM =yP*(-2.+yP*(-1.+yP*(2.+yP)))/24.0
-    !c0 =(2.-(yP+1))*((yP+1)-1.)*(yP+1)*((yP+1)+2.)/6.0
-    !c0 =(1.-yP)*yP*(yP+1)*(yP+3.)/6.0
-    c0 =yP*(3.+yP*(1.-yP*(3.+yP)))/6.0
-    !cP2=(yP-2.)*(yP-1.)*(yP+1.)*(yP+2.)/4.0
-    cP2=(4.+(yP**2)*(-5.+yP**2))/4.0
-    !cP3=(2.-yP)*yP*(yP+1.)*(yP+2.)/6.0
-    cP3=yP*(4+yP*(4-yP*(1.+yP)))/6.0
-    !cP4=(yP-1.)*yP*(yP+1.)*(yP+2.)/24.0
-    cP4=cM
-    !cP =(yP-2.)*(yP-1.)*yP*(yP+1.)/24.0 + ((2.-yP)*(yP-1.)*yP*(yP+2.)/6.0) &
-    !        & - (cM + c0)
-    cP = 1.0 - (cM+c0+cP2+cP3+cP4)
-
-    !e0 =((yP2+1)-2.)*((yP2+1)-1.)*(yP2+1)*((yP2+1)+1.)/24.0
-    !e0 =(yP2-1.)*yP2*(yP2+1)*(yP2+2.)/24.0
-    e0 =yP2*(-2.+yP2*(-1.+yP2*(2.+yP2)))/24.0
-    !eP2=(2.-yP2)*(yP2-1.)*yP2*(yP2+2.)/6.0
-    eP2=yP2*(-4.+yP2*(4.+yP2*(1.-yP2)))/6.0
-    !eP3=(yP2-2.)*(yP2-1.)*(yP2+1.)*(yP2+2.)/4.0
-    eP3=(4.+(yP2**2)*(-5.+yP2**2))/4.0
-    !eP4=(2.-yP2)*yP2*(yP2+1.)*(yP2+2.)/6.0
-    eP4=yP2*(4+yP2*(4-yP2*(1.+yP2)))/6.0
-    !eP5=(yP2-1.)*yP2*(yP2+1.)*(yP2+2.)/24.0
-    eP5=e0
-    !eP =((yP2-2.)*(yP2-1.)*yP2*(yP2+1.)/24.0) - e0
-    eP = 1.0 - (e0+eP2+eP3+eP4+eP5)
-
-    ! remeshing
-    buffer(j0-3) = buffer(j0-3)   +aM3*scaM 
-    buffer(j0-2) = buffer(j0-2)   +aM2*scaM +bM2*sca
-    buffer(j0-1) = buffer(j0-1)   + aM*scaM + bM*sca  + cM*scaP
-    buffer(j0)   = buffer(j0)     + a0*scaM + b0*sca  + c0*scaP + e0*scaP2
-    buffer(j0+1) = buffer(j0+1)   + aP*scaM + bP*sca  + cP*scaP + eP*scaP2
-    buffer(j0+2) = buffer(j0+2)   +aP2*scaM +bP2*sca  +cP2*scaP +eP2*scaP2
-    buffer(j0+3) = buffer(j0+3)             +bP3*sca  +cP3*scaP +eP3*scaP2
-    buffer(j0+4) = buffer(j0+4)                       +cP4*scaP +eP4*scaP2
-    buffer(j0+5) = buffer(j0+5)                                 +eP5*scaP2
-
-  end subroutine AC_remesh_O4_tag_LC
-
-
-  !> M'6 remeshing formula (order is more than 2, JM Ethancelin is working on
-  !! determining order).
-  !!      @param[in]       pos_adim= adimensionned particle position
-  !!      @param[in]       sca     = scalar advected by the particle
-  !!      @param[in,out]   buffer  = temporaly remeshed scalar field
-  subroutine AC_remesh_Mprime6(pos_adim, sca, buffer)
-
-    !Input/Ouput
-    real(WP), intent(in)                                        :: pos_adim, sca
-    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
-    ! Ohter local variables
-    integer     :: j0                       ! indice of the the nearest mesh points
-    real(WP)    :: bM, bM2, b0, bP, bP2, bP3! interpolation weight for the particles
-    real(WP)    :: yM2, y0, yP, yP2     ! adimensionned distance to mesh points
-
-    ! Mesh point used in remeshing formula
-    j0 = floor(pos_adim)
-
-    ! Distance to mesh points
-    y0 = (pos_adim - float(j0))
-    !yM = y0+1
-    yP = y0+1
-    yM2 = y0-2
-    yP2 = y0+2
-
-    ! Interpolation weights
-    !bM2 =-(((y0+2.)-2)*(5.*(y0+2.)-8.)*((y0+2.)-3.)**3)/24.
-    bM2 = y0*(2. + y0*(-1. + y0*(-9. + (13. - 5.*y0)*y0)))/24.
-    !bM  =(y0+1.-1.)*(y0+1.-2.)*(25.*(y0+1.)**3-114.*(y0+1.)**2+153.*(y0+1.)-48.)/24.
-    bM = y0*(-16. + y0*(16. + y0*(39. + y0*(-64. + 25.*y0))))/24.
-    !bP  =-((1.-y0)-1.)*(25.*(1.-y0)**4-38.*(1.-y0)**3-3.*(1.-y0)**2+12.*(1.-y0)+12)/12.
-    bP = ( y0*(8. + y0*(8. + y0*(33. + y0*(-62. + 25.*y0)))))/12.
-    !bP2 = ((2.-y0)-1.)*((2.-y0)-2.)*(25.*(2.-y0)**3-114.*(2.-y0)**2+153.*(2.-y0)-48.)/24.
-    bP2 = (y0*(-2. + y0*(-1. + y0*(-33. + (61. - 25.*y0)*y0))))/24.
-    !bP3 =-(((3.-y0)-2)*(5.*(3.-y0)-8.)*((3.-y0)-3.)**3)/24.
-    bP3 = (y0**3)*(7. + y0*(5.*y0 - 12.))/24.
-    !b0  =-(y0-1.)*(25.*y0**4-38.*y0**3-3.*y0**2+12.*y0+12)/12.
-    !b0 = (12. + y0**2*(-15. + y0*(-35. + (63. - 25.*y0)*y0)))/12.
-    b0 = 1. - (bM2+bM+bP+bP2+bP3)
-
-    ! remeshing
-    buffer(j0-2) = buffer(j0-2)   + sca*bM2
-    buffer(j0-1) = buffer(j0-1)   + sca*bM
-    buffer(j0)   = buffer(j0)     + sca*b0
-    buffer(j0+1) = buffer(j0+1)   + sca*bP
-    buffer(j0+2) = buffer(j0+2)   + sca*bP2
-    buffer(j0+3) = buffer(j0+3)   + sca*bP3
-
-  end subroutine AC_remesh_Mprime6
-
-end module advec_common
-!> @}
diff --git a/HySoP/src/scalesInterface/particles/advec_common_group.F90 b/HySoP/src/scalesInterface/particles/advec_common_group.f90
similarity index 97%
rename from HySoP/src/scalesInterface/particles/advec_common_group.F90
rename to HySoP/src/scalesInterface/particles/advec_common_group.f90
index 40773512f..138ddf851 100644
--- a/HySoP/src/scalesInterface/particles/advec_common_group.F90
+++ b/HySoP/src/scalesInterface/particles/advec_common_group.f90
@@ -231,13 +231,13 @@ subroutine AC_velocity_interpol_group(dt, direction, gs, ind_group, p_pos_adim,
             ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.                          
             tag = compute_tag(ind_group, tag_velo_range, direction, proc_gap)
             ! Send message
-#ifdef PART_DEBUG
-            if(com_size>max_size) then
-                print*, 'rank = ', cart_rank, ' -- bug sur taille cartography a envoyer'
-                print*, 'taille carto = ', com_size, ' plus grand que la taille théorique ', &
-                    & max_size, ' et carto = ', cartography(:,proc_gap)
-            end if
-#endif
+!!$#ifdef PART_DEBUG
+!!$            if(com_size>max_size) then
+!!$                print*, 'rank = ', cart_rank, ' -- bug sur taille cartography a envoyer'
+!!$                print*, 'taille carto = ', com_size, ' plus grand que la taille théorique ', &
+!!$                    & max_size, ' et carto = ', cartography(:,proc_gap)
+!!$            end if
+!!$#endif
             call mpi_ISsend(cartography(1,proc_gap), com_size, MPI_INTEGER, rece_rank(proc_gap), tag, &
                 & D_comm(direction), s_request_bis(proc_gap),ierr)
         end if
@@ -280,13 +280,13 @@ subroutine AC_velocity_interpol_group(dt, direction, gs, ind_group, p_pos_adim,
                 ind_for_i1 = ind_for_i1 + send_carto(2+i2)
             end do
             ! IIa_bis - check correctness
-#ifdef PART_DEBUG
-            if(com_size/=send_carto(1)) then
-                print*, 'rank = ', cart_rank, ' -- bug sur taille champ de vitesse a envoyer'
-                print*, 'taille carto = ', com_size, ' plus grand recu ', &
-                    & send_carto(1), ' et carto = ', send_carto(:)
-            end if
-#endif
+!!$#ifdef PART_DEBUG
+!!$            if(com_size/=send_carto(1)) then
+!!$                print*, 'rank = ', cart_rank, ' -- bug sur taille champ de vitesse a envoyer'
+!!$                print*, 'taille carto = ', com_size, ' plus grand recu ', &
+!!$                    & send_carto(1), ' et carto = ', send_carto(:)
+!!$            end if
+!!$#endif
             ! IIb - Compute send tag
             tag = compute_tag(ind_group, tag_velo_V, direction, proc_gap)
             ! IIc - Send message
@@ -1165,20 +1165,20 @@ subroutine AC_obtain_senders_group(direction, gp_s, ind_group, proc_min, proc_ma
         end do
     end do
 
-#ifdef PART_DEBUG
-    do proc_gap = proc_min_abs, proc_max_abs
-        if (first(1,proc_gap)>max_size) then
-            print*, 'too big array on proc = ', cart_rank, ' - proc_gap = ', proc_gap
-            print*, 'it occurs on AC_obtain_senders_group - array concerned : "first"'
-            print*, 'first = ', first(1,proc_gap)
-        end if
-        if (last(1,proc_gap)>max_size) then
-            print*, 'too big array on proc = ', cart_rank, ' - proc_gap = ', proc_gap
-            print*, 'it occurs on AC_obtain_senders_group - array concerned : "last"'
-            print*, 'last = ', last(1,proc_gap)
-        end if
-    end do
-#endif
+!!$#ifdef PART_DEBUG
+!!$    do proc_gap = proc_min_abs, proc_max_abs
+!!$        if (first(1,proc_gap)>max_size) then
+!!$            print*, 'too big array on proc = ', cart_rank, ' - proc_gap = ', proc_gap
+!!$            print*, 'it occurs on AC_obtain_senders_group - array concerned : "first"'
+!!$            print*, 'first = ', first(1,proc_gap)
+!!$        end if
+!!$        if (last(1,proc_gap)>max_size) then
+!!$            print*, 'too big array on proc = ', cart_rank, ' - proc_gap = ', proc_gap
+!!$            print*, 'it occurs on AC_obtain_senders_group - array concerned : "last"'
+!!$            print*, 'last = ', last(1,proc_gap)
+!!$        end if
+!!$    end do
+!!$#endif
     
     ! -- Send information if I am the first or the last --
     do proc_gap = proc_min_abs, proc_max_abs
@@ -1378,21 +1378,21 @@ subroutine AC_obtain_senders_com(direction, gp_s, ind_group, proc_min, proc_max,
         end do
     end do
 
-#ifdef PART_DEBUG
-    ! -- Check the size of fist and last --
-    do proc_gap = proc_min_abs, proc_max_abs
-        if (first(1,proc_gap)>com_size) then
-            print*, 'too big array on proc = ', cart_rank, ' - proc_gap = ', proc_gap
-            print*, 'it occurs on AC_obtain_senders_group - array concerned : "first"'
-            print*, 'first = ', first(:, proc_gap)
-        end if
-        if (last(1,proc_gap)>com_size) then
-            print*, 'too big array on proc = ', cart_rank, ' - proc_gap = ', proc_gap
-            print*, 'it occurs on AC_obtain_senders_group - array concerned : "last"'
-            print*, 'last = ', last(:,proc_gap)
-        end if
-    end do
-#endif
+!!$#ifdef PART_DEBUG
+!!$    ! -- Check the size of fist and last --
+!!$    do proc_gap = proc_min_abs, proc_max_abs
+!!$        if (first(1,proc_gap)>com_size) then
+!!$            print*, 'too big array on proc = ', cart_rank, ' - proc_gap = ', proc_gap
+!!$            print*, 'it occurs on AC_obtain_senders_group - array concerned : "first"'
+!!$            print*, 'first = ', first(:, proc_gap)
+!!$        end if
+!!$        if (last(1,proc_gap)>com_size) then
+!!$            print*, 'too big array on proc = ', cart_rank, ' - proc_gap = ', proc_gap
+!!$            print*, 'it occurs on AC_obtain_senders_group - array concerned : "last"'
+!!$            print*, 'last = ', last(:,proc_gap)
+!!$        end if
+!!$    end do
+!!$#endif
     
     ! -- Send information if I am the first or the last --
     do proc_gap = proc_min_abs, proc_max_abs
-- 
GitLab