From ce05ef06277ad7daf4fdaa02ac4c6810852e1b38 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Franck=20P=C3=A9rignon?= <franck.perignon@imag.fr>
Date: Thu, 7 Jun 2012 07:09:14 +0000
Subject: [PATCH] Scales adapted to scales2py

---
 HySoP/src/Unstable/LEGI/CMake                 |   1 +
 HySoP/src/Unstable/LEGI/CMakeLists.txt        | 207 +++++
 .../LEGI/src/layout/cart_topology.f90         | 863 +++++++++---------
 .../src/Unstable/LEGI/src/particles/advec.f90 | 138 +--
 .../Unstable/LEGI/src/particles/advecX.f90    | 791 ++++++++--------
 .../Unstable/LEGI/src/particles/advecY.f90    | 666 +++++++-------
 .../Unstable/LEGI/src/particles/advecZ.f90    | 696 +++++++-------
 .../LEGI/src/particles/advec_common.f90       | 179 +---
 .../LEGI/src/particles/advec_common_line.f90  | 491 +++++-----
 .../LEGI/src/particles/advec_variables.f90    | 218 ++---
 HySoP/src/Unstable/LEGI/test/CMakeLists.txt   |   1 -
 11 files changed, 2147 insertions(+), 2104 deletions(-)
 create mode 120000 HySoP/src/Unstable/LEGI/CMake
 create mode 100644 HySoP/src/Unstable/LEGI/CMakeLists.txt
 delete mode 100644 HySoP/src/Unstable/LEGI/test/CMakeLists.txt

diff --git a/HySoP/src/Unstable/LEGI/CMake b/HySoP/src/Unstable/LEGI/CMake
new file mode 120000
index 000000000..f988a93ec
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/CMake
@@ -0,0 +1 @@
+../../../CMake
\ No newline at end of file
diff --git a/HySoP/src/Unstable/LEGI/CMakeLists.txt b/HySoP/src/Unstable/LEGI/CMakeLists.txt
new file mode 100644
index 000000000..302d6cd4a
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/CMakeLists.txt
@@ -0,0 +1,207 @@
+#=======================================================
+# cmake utility to compile and install JB library
+#
+# F. Pérignon, june 2012
+#
+#=======================================================
+
+# ============= Global cmake Settings ============= 
+# Set minimum version for cmake
+cmake_minimum_required(VERSION 2.8.7)
+# Set cmake modules directory (i.e. the one which contains all user-defined FindXXX.cmake files among other things)
+set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/CMake)
+# Force out-of-source build
+include(OutOfSourceBuild)
+# Some usefull macros
+include(MyTools)
+
+# User defined options
+option(VERBOSE_MODE "enable verbose mode for cmake exec. Default = on" ON)
+option(USE_MPI "compile and link parmes with mpi when this mode is enable. Default = on." ON)
+option(DOUBLEPREC "set precision for real numbers to double precision when this mode is enable. Default = on." ON)
+option(WITH_TESTS "Enable testing. Default = on" OFF)
+option(BUILD_SHARED_LIBS "Enable dynamic library build, default = ON" ON)
+
+# cmake project name
+set(PROJECT_NAME parmeslegi)
+# --- Name for the package ---
+# This name will be used to install parmeslegi (library, headers, ...) and when another lib or soft will need to search for parmeslegi.
+set(PACKAGE_NAME "parmeslegi")
+# --- Set a version number for the package ---
+set(${PACKAGE_NAME}_version 1.0.0)
+# --- The name (without extension) of the lib to be created ---
+set(PROJECT_LIBRARY_NAME ${PROJECT_NAME})
+# --- The name of the exe to be created (test purpose)
+# This exe will be linked with libPROJECT_LIBRARY_NAME 
+set(EXE_NAME ${PROJECT_NAME}Run)
+# The list of all dirs containing sources to be compiled for the parmeslegi lib
+# Any file in those dirs will be used to create libparmes
+set(${PROJECT_LIBRARY_NAME}_SRCDIRS 
+  src
+  src/Layout
+  src/particles
+  src/output
+  )
+# A main file to create an executable (test purpose)
+# Any files in these dirs will be used to create parmeslegi exec (linked with libparmes)
+set(${EXE_NAME}_SRCDIRS test/ test/src/ test/src/Test_advec
+  test/src/Test_io test/src/Test_topo)
+# Matching expr for files to be compiled. 
+set(EXTS *.f90 *.F90)
+# Matching expr for headers (install purpose)
+set(EXTS_HDRS *.hpp *.h)
+#
+# ============= The project =============
+# Set project name and project languages 
+# => this automatically defines:
+#   - ${PROJECT_NAME}_BINARY_DIR : where you have run cmake, i.e. the place for compilation
+#   - ${PROJECT_NAME}_SOURCE_DIR : where sources (.f and .h and this CMakeLists.txt) are located
+# Note that because of OutOfSourceBuild, binary_dir and source_dir must be different. 
+
+project(${PROJECT_NAME} Fortran)
+
+# ============= Tests =============
+if(WITH_TESTS)
+  enable_testing()
+endif(WITH_TESTS)
+
+# ============= Search for libraries  =============
+# We search for libraries parmeslegi depends on and 
+# set the compile/link conf (-I and -L opt)
+
+# --- MPI ---
+if(USE_MPI)
+  # Find MPI for C++ and fortran.
+  find_package(MPI REQUIRED)
+  # -I
+  include_directories(${MPI_Fortran_INCLUDE_PATH})
+  # Add compilation flags
+  append_Fortran_flags(${MPI_Fortran_COMPILE_FLAGS})
+  set(${PROJECT_NAME}_LINK_FLAGS ${${PROJECT_NAME}_LINK_FLAGS} ${MPI_Fortran_LINK_FLAGS})
+  #endif(MPI_Fortran_COMPILER)
+  set(LIBS ${LIBS} ${MPI_Fortran_LIBRARIES} )
+endif()
+
+# ============= Prepare compilation =============
+
+# Force a default build type if not provided by user
+# CMAKE_BUILD_TYPE = empty, Debug, Release, RelWithDebInfo or MinSizeRel.
+if (NOT CMAKE_BUILD_TYPE)
+  set (CMAKE_BUILD_TYPE RELEASE CACHE STRING "Choose the type of build, options are: None, Debug, Release, RelWithDebInfo or MinSizeRel." FORCE)  
+endif (NOT CMAKE_BUILD_TYPE)
+
+# Set module files directory (i.e. where .mod will be created) 
+set(CMAKE_Fortran_MODULE_DIRECTORY ${CMAKE_BINARY_DIR}/Modules)
+#  Add compilation flags:
+append_Fortran_FLAGS("-Wall")
+
+# ============= Source and header files list =============
+# We scan all files with matching extension in directories 
+# containing sources.
+
+# Source and header files list:
+foreach(_DIR ${${PROJECT_LIBRARY_NAME}_SRCDIRS})
+  set(_DIR_FILES)
+  foreach(_EXT ${EXTS}) # Source files
+    file(GLOB _DIR_FILES_EXT ${_DIR}/${_EXT})
+    if(_DIR_FILES_EXT)
+      list(APPEND ${PROJECT_LIBRARY_NAME}_SRC ${_DIR_FILES_EXT})
+    endif()
+  endforeach()
+  foreach(_EXT ${EXTS_HDRS}) # Headers
+    file(GLOB _DIR_FILES_EXT ${_DIR}/${_EXT})
+    if(_DIR_FILES_EXT)
+      list(APPEND ${PROJECT_LIBRARY_NAME}_HDRS ${_DIR_FILES_EXT})
+    endif()
+  endforeach()
+endforeach()
+# We add headers to source files
+list(APPEND ${PROJECT_LIBRARY_NAME}_SRC ${${PROJECT_LIBRARY_NAME}_HDRS})
+
+# The same for main dir ...
+foreach(_DIR ${${EXE_NAME}_SRCDIRS})
+  set(_DIR_FILES)
+  foreach(_EXT ${EXTS})
+    file(GLOB _DIR_FILES_EXT ${_DIR}/${_EXT})
+    if(_DIR_FILES_EXT)
+      list(APPEND ${EXE_NAME}_SRC ${_DIR_FILES_EXT})
+    endif()
+  endforeach()
+  foreach(_EXT ${EXTS_HDRS}) 
+    file(GLOB _DIR_FILES_EXT ${_DIR}/${_EXT})
+    if(_DIR_FILES_EXT)
+      list(APPEND ${EXE_NAME}_HDRS ${_DIR_FILES_EXT})
+    endif()
+  endforeach()
+endforeach()
+list(APPEND ${EXE_NAME}_SRC ${${EXE_NAME}_HDRS})
+
+# Add directories to those searched by compiler ...
+# -I
+include_directories(${${PROJECT_LIBRARY_NAME}_SRCDIRS})
+include_directories(${${EXE_NAME}_HDRS})
+include_directories(${CMAKE_Fortran_MODULE_DIRECTORY})
+
+# ============= Creates the library =============
+if(BUILD_SHARED_LIBS) # shared library
+  add_library(${PROJECT_LIBRARY_NAME} SHARED ${${PROJECT_LIBRARY_NAME}_SRC})
+else() # static library
+  add_library(${PROJECT_LIBRARY_NAME} STATIC ${${PROJECT_LIBRARY_NAME}_SRC})
+endif()
+# Libs to link with PROJECT__LIBRARY_NAME
+target_link_libraries(${PROJECT_LIBRARY_NAME} ${LIBS})
+
+# ============= Creates the executable =============
+add_executable(${EXE_NAME} ${${EXE_NAME}_SRC})
+add_dependencies(${EXE_NAME} ${PROJECT_LIBRARY_NAME})
+
+# libs to link with EXE_NAME
+target_link_libraries(${EXE_NAME} ${PROJECT_LIBRARY_NAME})
+target_link_libraries(${EXE_NAME} ${LIBS})
+
+# ============== Add tests ==============
+if(WITH_TESTS)
+  message(STATUS "Enable testing ...")
+  begin_test(src/tests/F2003)
+  new_test(testAllocatedPtr userMod.f90 wrapper.f90 testAllocatedPtr.cxx)
+  new_test(testNullPtr userMod.f90 wrapper.f90 testNullPtr.cxx)
+  end_test()
+endif(WITH_TESTS)
+
+# ============= Prepare install =============
+
+# The library
+# The library, the headers and mod files, the cmake generated files
+# will be install in CMAKE_INSTALL_PREFIX/lib include and share
+#include(InstallPackage)
+
+#install_package(${PACKAGE_NAME} ${PROJECT_LIBRARY_NAME} ${${PROJECT_NAME}_HDRS})
+
+#install(TARGETS ${EXE_NAME}  
+#RUNTIME DESTINATION bin  # executables
+#  )
+
+# ============= RPATH =============
+# Concerning rpath see for example http://www.itk.org/Wiki/CMake_RPATH_handling
+
+# --------------------------------------------
+# do not skip the full RPATH for the build tree
+set(CMAKE_SKIP_BUILD_RPATH  FALSE)
+# when building, don't use the install RPATH already
+# (but later on when installing)
+set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) 
+# the RPATH to be used when installing
+set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib")
+# add the automatically determined parts of the RPATH
+# which point to directories outside the build tree to the install RPATH
+set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
+
+# ============= Summary =============
+if(VERBOSE_MODE)
+  message(STATUS "====================== Summary ======================")
+  message(STATUS " Compiler : ${CMAKE_Fortran_COMPILER}")
+  message(STATUS " Sources are in : ${CMAKE_SOURCE_DIR}")
+  message(STATUS " Project uses MPI : ${USE_MPI}")
+  message(STATUS " Project will be installed in ${CMAKE_INSTALL_PREFIX}")
+  message(STATUS "====================== ======= ======================")
+endif()
diff --git a/HySoP/src/Unstable/LEGI/src/layout/cart_topology.f90 b/HySoP/src/Unstable/LEGI/src/layout/cart_topology.f90
index b3b60fdd2..3bf33e570 100644
--- a/HySoP/src/Unstable/LEGI/src/layout/cart_topology.f90
+++ b/HySoP/src/Unstable/LEGI/src/layout/cart_topology.f90
@@ -35,109 +35,106 @@
 
 module cart_topology
 
-    use precision
-    use mpi, only : MPI_COMM_WORLD,MPI_TAG_UB
-
-    implicit none
-
-    ! ===== Public variables =====
-
-    ! ----- Communicators -----
-    !> Communicator associated with the cartesian topology
-    integer, protected                  :: cart_comm        
-    !> Communicators devoted to 1-dimensionnal subgrids (along Y and Z)
-    integer, protected                  :: X_comm, Y_comm, Z_comm   
-    !> Table of the previous communicators (ie comm devoted to 1D subgrids)
-    integer, dimension(3), protected    :: D_comm
-    !> Rank of immediate neighbors
-    integer, dimension(3,2), protected  :: neighbors
-
-    ! ----- Information about mesh subdivision and on the global grid -----
-    !> number of processes in each direction
-    integer, dimension(3), protected    :: nb_proc_dim
-    !> information about min and max local indice on the current directory
-    integer, dimension(3), protected    :: begin_proc, end_proc     
-    !> space lengh of th domain
-    real(WP), dimension(3), protected   :: length
-    !> space step for scalar discretisation
-    real(WP), dimension(3), protected   :: d_sc                     
-    !> number of (sub)grid in each direction
-    integer, dimension(3), protected    :: N
-    integer, dimension(3), protected    :: N_proc
-    !> rank of current processus (in the cartesian communicator)
-    integer, public                     :: cart_rank
-    !> rank of current processus (in the in communicator associated to the different direction)
-    integer, dimension(3), public       :: D_rank
-    !> coordinate of the current processus
-    integer, dimension(3), protected    :: coord
-    !> YZ coordinate of the current processus
-    integer, dimension(2), protected    :: coordYZ
-    !> Periodic boundary conditions: logical array, equals true if periodic
-    logical, dimension(3),protected     :: periods                      
-    !> Computation are done by group of line. Here we define their size
-    integer, dimension(3,2), protected  :: group_size
-    !> To check if group size is initialized
-    logical, private                    :: group_init = .false.
-    !> To concatenate position in order to create unique mpi message tag
-    integer, dimension(3,2), private    :: tag_size
-    !> To concatenate rank in order to create unique mpi message tag
-    integer, private                    :: tag_rank
-    !> To check if mesh is already initialized
-    logical, private                    :: mesh_init = .false.
-    !> Default mesh resolution
-    integer, parameter                  :: default_size = 80
-
-    ! ==== Public procedures ==== 
-    ! Creation of the cartesian topology
-    public      :: cart_create
-    ! Initialise mesh information (first part)
-    public      :: discretisation_create
-    public      :: discretisation_default
-    ! Compute tag for mpi message
-    public      :: compute_tag
-    private     :: compute_tag_gap
-    private     :: compute_tag_NP
-    ! Adjust some private variale
-    public      :: set_group_size
-    private     :: set_group_size_1
-    private     :: set_group_size_1x2
-    private     :: set_group_size_3
-    private     :: set_group_size_init
-
-    ! ==== Public procedures ==== 
-    ! Initialise mesh information (second part)
-    private     :: discretisation_init
-
-    interface compute_tag
-        module procedure compute_tag_gap, compute_tag_NP
-    end interface compute_tag
-
-    interface set_group_size
-    !>    Size of group of line is used to gather line together in the particle
-    !! solver. As it is a crucial parameter, it must be possible for user to changed
-    !! it without broke everything (ie if user value is bad it has to be ignored) and
-    !! to be set up by default to a "right and intelligent" value. Use
-    !! set_group_size for this purpose. An optional logical argument "init" is
-    !! used to set up group_size to a default "acceptable" value if user value
-    !! is not acceptable considering mesh size (ie will create bugs).
-        module procedure set_group_size_1, set_group_size_1x2, set_group_size_3, &
-            & set_group_size_init
-    end interface set_group_size
+  use precision
+  use mpi, only : MPI_COMM_WORLD,MPI_TAG_UB
+
+  implicit none
+  
+  ! ----- Communicators -----
+  !> Communicator associated with the cartesian topology
+  integer, protected                  :: cart_comm        
+  !> Communicators devoted to 1-dimensionnal subgrids (along Y and Z)
+  integer, protected                  :: X_comm, Y_comm, Z_comm   
+  !> Table of the previous communicators (ie comm devoted to 1D subgrids)
+  integer, dimension(3), protected    :: D_comm
+  !> Rank of immediate neighbors
+  integer, dimension(3,2), protected  :: neighbors
+  ! ----- Information about mesh subdivision and on the global grid -----
+  !> number of processes in each direction
+  integer, dimension(3), protected    :: nb_proc_dim
+  !> information about min and max local indice on the current directory
+  integer, dimension(3), protected    :: begin_proc, end_proc     
+  !> space lengh of th domain
+  real(WP), dimension(3), protected   :: length
+  !> space step for scalar discretisation
+  real(WP), dimension(3), protected   :: d_sc                     
+  !> number of (sub)grid in each direction
+  integer, dimension(3), protected    :: N
+  integer, dimension(3), protected    :: N_proc
+  !> rank of current processus (in the cartesian communicator)
+  integer, public                     :: cart_rank
+  !> rank of current processus (in the in communicator associated to the different direction)
+  integer, dimension(3), public       :: D_rank
+  !> coordinate of the current processus
+  integer, dimension(3), protected    :: coord
+  !> YZ coordinate of the current processus
+  integer, dimension(2), protected    :: coordYZ
+  !> Periodic boundary conditions: logical array, equals true if periodic
+  logical, dimension(3),protected     :: periods                      
+  !> Computation are done by group of line. Here we define their size
+  integer, dimension(3,2), protected  :: group_size
+  !> To check if group size is initialized
+  logical, private                    :: group_init = .false.
+  !> To concatenate position in order to create unique mpi message tag
+  integer, dimension(3,2), private    :: tag_size
+  !> To concatenate rank in order to create unique mpi message tag
+  integer, private                    :: tag_rank
+  !> To check if mesh is already initialized
+  logical, private                    :: mesh_init = .false.
+  !> Default mesh resolution
+  integer, parameter                  :: default_size = 80
+
+  ! ==== Public procedures ==== 
+  ! Creation of the cartesian topology
+  public cart_create
+  ! Initialise mesh information (first part)
+  public      :: discretisation_create
+  public      :: discretisation_default
+  ! Compute tag for mpi message
+  public      :: compute_tag
+  private     :: compute_tag_gap
+  private     :: compute_tag_NP
+  ! Adjust some private variale
+  public      :: set_group_size
+  private     :: set_group_size_1
+  private     :: set_group_size_1x2
+  private     :: set_group_size_3
+  private     :: set_group_size_init
+
+  ! ==== Public procedures ==== 
+  ! Initialise mesh information (second part)
+  private     :: discretisation_init
+
+  interface compute_tag
+     module procedure compute_tag_gap, compute_tag_NP
+  end interface compute_tag
+
+  interface set_group_size
+     !>    Size of group of line is used to gather line together in the particle
+     !! solver. As it is a crucial parameter, it must be possible for user to changed
+     !! it without broke everything (ie if user value is bad it has to be ignored) and
+     !! to be set up by default to a "right and intelligent" value. Use
+     !! set_group_size for this purpose. An optional logical argument "init" is
+     !! used to set up group_size to a default "acceptable" value if user value
+     !! is not acceptable considering mesh size (ie will create bugs).
+     module procedure set_group_size_1, set_group_size_1x2, set_group_size_3, &
+          & set_group_size_init
+  end interface set_group_size
 
 contains
 
-!> Creation of the cartesian mpi topology and (if needed) of all communicators
-!! used for particles method.
-!!    @param[in]    dims        = array specifying the number of processes in each dimension
-!!    @param[out]   ierr        = error code
-!!    @param[out]   spec_comm   = mpi communicator used by the spectral part of the code (optional).
-!!    @param[in]    topology    = to choose the dimension of the mpi topology (if 0 then none) (optional).
-!! @details 
-!!        This subroutine initialzed the mpi topologic and returns the communicator
-!!    that will be used for all the spectral part of the code (ie everything except
-!!    the particles part). If needed, it also initialzed all the mpi context
-!!    used by the particles solver.
-subroutine cart_create(dims, ierr, spec_comm, topology)
+  !> Creation of the cartesian mpi topology and (if needed) of all communicators
+  !! used for particles method.
+  !!    @param[in]    dims        = array specifying the number of processes in each dimension
+  !!    @param[out]   ierr        = error code
+  !!    @param[out]   spec_comm   = mpi communicator used by the spectral part of the code (optional).
+  !!    @param[in]    topology    = to choose the dimension of the mpi topology (if 0 then none) (optional).
+  !! @details 
+  !!        This subroutine initialzed the mpi topologic and returns the communicator
+  !!    that will be used for all the spectral part of the code (ie everything except
+  !!    the particles part). If needed, it also initialzed all the mpi context
+  !!    used by the particles solver.
+  subroutine cart_create(dims, ierr, spec_comm, topology)
 
     ! Input/Output
     integer, dimension(:), intent(in)   :: dims
@@ -147,7 +144,7 @@ subroutine cart_create(dims, ierr, spec_comm, topology)
     ! Other lspec_comm        ocal variables
     logical                 :: reorganisation                   ! to choose to reordered or not the processus rank.
     logical, dimension(3)   :: remains_dim                      ! use to create 1D-subdivision : remains_dims(i) equal
-                                                                ! true if the i-th dimension is kept in the subgrid.
+    ! true if the i-th dimension is kept in the subgrid.
     integer                 :: direction                        ! current direction : 1 = along X, 2 = along Y and 3 = alongZ
     integer                 :: topology_dim=3                   ! recopy of the optional input "topology".
     integer                 :: key                              ! to re-order processus in spec_comm
@@ -157,150 +154,144 @@ subroutine cart_create(dims, ierr, spec_comm, topology)
     ! If there is some scalar to advec with particles method, then initialized
     ! the 2D mpi topology
     if (present(topology))  then
-        select case (topology)
-            case(0)
-                topology_dim = 0
-            case(1)
-                topology_dim = 1
-            case default
-                topology_dim = 3
-        end select
+       select case (topology)
+       case(0)
+          topology_dim = 0
+       case(1)
+          topology_dim = 1
+       case default
+          topology_dim = 3
+       end select
     end if
 
     select case (topology_dim)
     case (3)
-        ! ===== Create a 2D mpi topology =====
-        ! 2D topology is created and mpi context is initialized for both
-        ! spectral and particles code
-
-        ! --- Creation of the cartesian topology ---
-        reorganisation = .true.
-        periods = .true.
-        if (size(dims)==2) then 
-            nb_proc_dim = (/ 1, dims(1), dims(2) /)
-        else if (size(dims)==3) then
-            nb_proc_dim = dims
-            if (nb_proc_dim(1)/=1) then
-                call mpi_comm_rank(MPI_COMM_WORLD, cart_rank, ierr)
-                if (cart_rank==0) write(*,'(a)') ' XXXXXXXXXX Warning: subdision along X XXXXXXXXXX'
-            end if
-        else
-            call mpi_comm_rank(MPI_COMM_WORLD, cart_rank, ierr)
-            if (cart_rank==0) then
-                write(*,'(a)') ' XXXXXXXXXX Error - wrong nb of processus XXXXXXXXXX'
-                write(*,'(a,10(x,i0))') ' input argument dims =', dims
-            end if
-            stop
-        end if
-
-        call mpi_cart_create(MPI_COMM_WORLD, 3, nb_proc_dim, periods, reorganisation, &
-                & cart_comm, ierr)
-
-        ! --- Create 1D communicator ---
-        ! Subdivision in 1D-subgrids and creation of communicator devoted to
-        ! 1D-communication
-        ! Communication along X-axis 
-        remains_dim = (/.true., .false., .false. /)
-        call mpi_cart_sub(cart_comm, remains_dim, X_comm, ierr)
-        D_comm(1) = X_comm
-        ! Communication along Y-axis (in the 3D mesh, ie the x-axis on the mpi-topology)
-        remains_dim = (/.false., .true., .false. /)
-        call mpi_cart_sub(cart_comm, remains_dim, Y_comm, ierr)
-        D_comm(2) = Y_comm
-        ! Communication along Z-axis 
-        remains_dim = (/ .false., .false., .true. /)
-        call mpi_cart_sub(cart_comm, remains_dim, Z_comm, ierr)
-        D_comm(3) = Z_comm
-
-        ! --- Initialise information about the current processus ---
-        call mpi_comm_rank(cart_comm, cart_rank, ierr)
-        do direction = 1, 3
-            call mpi_comm_rank(D_comm(direction), D_rank(direction), ierr)
-            call mpi_cart_shift(D_comm(direction), 0, 1, neighbors(direction,1), neighbors(direction,2), ierr)
-        end do
-        call mpi_cart_coords(cart_comm, cart_rank, 3, coord, ierr)
-        coordYZ = (/ coord(2), coord(3) /)
-
-        ! --- Spectral context ---
-        ! Initialized the communicator used on which the spectral part
-        ! will be based.
-        if (present(spec_comm)) then
-            !> Rank numerotation in spectral communicator grow along first
-            !! direction first and then along the second, the opposite of mpi 
-            !! rank numerotation. That is why processus are reoder and 2
-            !! communicator are created.
-            !! Example with 4 processus 
-            !! coord    // mpi-cart rank    // spec rank
-            !! (0,0,0)  // 0                // 0
-            !! (0,1,0)  // 2                // 1
-            !! (0,0,1)  // 1                // 2
-            !! (0,1,1)  // 3                // 3
-            ! Construct key to reoder
-            key = coord(1) + (coord(2) + coord(3)*nb_proc_dim(2))*nb_proc_dim(1)
-            ! As not split along X, it must be equivalent to "key = coord(2) + coord(3)*nb_proc_dim(2)"
-            ! Construct spectral communicator
-            call mpi_comm_split(cart_comm, 1, key, spec_comm, ierr)
-        end if
+       ! ===== Create a 2D mpi topology =====
+       ! 2D topology is created and mpi context is initialized for both
+       ! spectral and particles code
+
+       ! --- Creation of the cartesian topology ---
+       reorganisation = .true.
+       periods = .true.
+       if (size(dims)==2) then 
+          nb_proc_dim = (/ 1, dims(1), dims(2) /)
+       else if (size(dims)==3) then
+          nb_proc_dim = dims
+          if (nb_proc_dim(1)/=1) then
+             call mpi_comm_rank(MPI_COMM_WORLD, cart_rank, ierr)
+             if (cart_rank==0) write(*,'(a)') ' XXXXXXXXXX Warning: subdision along X XXXXXXXXXX'
+          end if
+       else
+          call mpi_comm_rank(MPI_COMM_WORLD, cart_rank, ierr)
+          if (cart_rank==0) then
+             write(*,'(a)') ' XXXXXXXXXX Error - wrong nb of processus XXXXXXXXXX'
+             write(*,'(a,10(x,i0))') ' input argument dims =', dims
+          end if
+          stop
+       end if
+
+       call mpi_cart_create(MPI_COMM_WORLD, 3, nb_proc_dim, periods, reorganisation, &
+            & cart_comm, ierr)
+
+       ! --- Create 1D communicator ---
+       ! Subdivision in 1D-subgrids and creation of communicator devoted to
+       ! 1D-communication
+       ! Communication along X-axis 
+       remains_dim = (/.true., .false., .false. /)
+       call mpi_cart_sub(cart_comm, remains_dim, X_comm, ierr)
+       D_comm(1) = X_comm
+       ! Communication along Y-axis (in the 3D mesh, ie the x-axis on the mpi-topology)
+       remains_dim = (/.false., .true., .false. /)
+       call mpi_cart_sub(cart_comm, remains_dim, Y_comm, ierr)
+       D_comm(2) = Y_comm
+       ! Communication along Z-axis 
+       remains_dim = (/ .false., .false., .true. /)
+       call mpi_cart_sub(cart_comm, remains_dim, Z_comm, ierr)
+       D_comm(3) = Z_comm
+
+       ! --- Initialise information about the current processus ---
+       call mpi_comm_rank(cart_comm, cart_rank, ierr)
+       do direction = 1, 3
+          call mpi_comm_rank(D_comm(direction), D_rank(direction), ierr)
+          call mpi_cart_shift(D_comm(direction), 0, 1, neighbors(direction,1), neighbors(direction,2), ierr)
+       end do
+       call mpi_cart_coords(cart_comm, cart_rank, 3, coord, ierr)
+       coordYZ = (/ coord(2), coord(3) /)
+
+       ! --- Spectral context ---
+       ! Initialized the communicator used on which the spectral part
+       ! will be based.
+       if (present(spec_comm)) then
+          !> Rank numerotation in spectral communicator grow along first
+          !! direction first and then along the second, the opposite of mpi 
+          !! rank numerotation. That is why processus are reoder and 2
+          !! communicator are created.
+          !! Example with 4 processus 
+          !! coord    // mpi-cart rank    // spec rank
+          !! (0,0,0)  // 0                // 0
+          !! (0,1,0)  // 2                // 1
+          !! (0,0,1)  // 1                // 2
+          !! (0,1,1)  // 3                // 3
+          ! Construct key to reoder
+          key = coord(1) + (coord(2) + coord(3)*nb_proc_dim(2))*nb_proc_dim(1)
+          ! As not split along X, it must be equivalent to "key = coord(2) + coord(3)*nb_proc_dim(2)"
+          ! Construct spectral communicator
+          call mpi_comm_split(cart_comm, 1, key, spec_comm, ierr)
+       end if
 
     case (1)
-            ! Construct 1D non-periodic mpi topology
-            nb_proc = product(nb_proc_dim)
-            call mpi_cart_create(MPI_COMM_WORLD, 1, nb_proc, period_1D, reorganisation, &
-                & cart_comm, ierr)
-            ! Use it as spectral communicator.
-            spec_comm = cart_comm
+       ! Construct 1D non-periodic mpi topology
+       nb_proc = product(nb_proc_dim)
+       call mpi_cart_create(MPI_COMM_WORLD, 1, nb_proc, period_1D, reorganisation, &
+            & cart_comm, ierr)
+       ! Use it as spectral communicator.
+       spec_comm = cart_comm
 
     case default
-        ! ===== Do not use mpi topology =====
-        if (present(spec_comm)) then
-            spec_comm = MPI_COMM_WORLD
-        end if
+       ! ===== Do not use mpi topology =====
+       if (present(spec_comm)) then
+          spec_comm = MPI_COMM_WORLD
+       end if
     end select
 
-    
+
     ! Print some minimal information about the topology
     if (cart_rank == 0) then
-        write(*,'(a)') ''
-        write(*,'(6x,a)') '========== Topology used ========='
-        if (topology_dim == 0) then
-            write(*,'(6x,a)') 'No mpi topology'
-        else
-            write(*,'(6x,i0,a)') topology_dim,'D mpi topology'
-        end if
-        write(*,'(6x,a,i0,x,i0,x,i0)') 'nb of proc along X, Y, Z = ', nb_proc_dim
-        write(*,'(6x,a)') '=================================='
-        write(*,'(a)') ''
+       write(*,'(a)') ''
+       write(*,'(6x,a)') '========== Topology used ========='
+       if (topology_dim == 0) then
+          write(*,'(6x,a)') 'No mpi topology'
+       else
+          write(*,'(6x,i0,a)') topology_dim,'D mpi topology'
+       end if
+       write(*,'(6x,a,i0,x,i0,x,i0)') 'nb of proc along X, Y, Z = ', nb_proc_dim
+       write(*,'(6x,a)') '=================================='
+       write(*,'(a)') ''
     end if
 
-end subroutine cart_create
-
-!> Create the mesh structure associated to the topology 
-!!    @param[in]    Nx  = number of meshes along X
-!!    @param[in]    Ny  = number of meshes along X
-!!    @param[in]    Nz  = number of meshes along X
-!!    @param[in]    Lx  = number of meshes along X
-!!    @param[in]    Ly  = number of meshes along Y
-!!    @param[in]    Lz  = number of meshes along Z
-!! @details
-!!    Initialise the mesh data associated to the mpi topology and used by the
-!!    particle solver
-!!    @author Jean-Baptiste Lagaert
-subroutine discretisation_create(Nx, Ny, Nz, Lx, Ly, Lz)
-
-    implicit none
+  end subroutine cart_create
+
+  !> Create the mesh structure associated to the topology 
+  !!    @param[in]    Nx  = number of meshes along X
+  !!    @param[in]    Ny  = number of meshes along X
+  !!    @param[in]    Nz  = number of meshes along X
+  !!    @param[in]    Lx  = number of meshes along X
+  !!    @param[in]    Ly  = number of meshes along Y
+  !!    @param[in]    Lz  = number of meshes along Z
+  !! @details
+  !!    Initialise the mesh data associated to the mpi topology and used by the
+  !!    particle solver
+  !!    @author Jean-Baptiste Lagaert
+  subroutine discretisation_create(resolution,domainLength)
 
     ! Input/Output
-    integer, intent(in)     :: Nx, Ny, Nz
-    real(WP), intent(in)    ::Lx, Ly, Lz
+    integer, dimension(3),intent(in)     :: resolution
+    real(WP), dimension(3),intent(in)    :: domainLength
 
     ! A cubic geometry : unitary lengh and 100 mesh points in each direction.
-    N(1) = Nx
-    N(2) = Ny
-    N(3) = Nz
 
-    length(1)= Lx
-    length(2)= Ly
-    length(3)= Lz
+    N = resolution
+    length = domainLength
 
     N_proc = N / nb_proc_dim
     begin_proc = 1
@@ -312,16 +303,14 @@ subroutine discretisation_create(Nx, Ny, Nz, Lx, Ly, Lz)
     mesh_init = .false.
     call discretisation_init()
 
-end subroutine discretisation_create
+  end subroutine discretisation_create
 
-!> Defaut mesh setup
-!!    @author Jean-Baptiste Lagaert
-!! @details
-!!    Initialise the mesh data associated to the mpi topology and used by the
-!!    particle solver to a default 100x100x100 mesh grid.
-subroutine discretisation_default()
-
-    implicit none
+  !> Defaut mesh setup
+  !!    @author Jean-Baptiste Lagaert
+  !! @details
+  !!    Initialise the mesh data associated to the mpi topology and used by the
+  !!    particle solver to a default 100x100x100 mesh grid.
+  subroutine discretisation_default()
 
     ! A cubic geometry : unitary lengh and 100 mesh points in each direction.
     N = default_size
@@ -334,22 +323,20 @@ subroutine discretisation_default()
     call set_group_size_init()
     mesh_init = .false.
     call discretisation_init()
-    
-end subroutine discretisation_default
-
-!> To initialize some hidden mesh parameters
-!!    @author Jean-Baptiste Lagaert
-!! @details
-!!        In order to deal well with the mpi topology, the data structure and the
-!!    mesh cut, some other parameters have to be initialised. Some are parameters
-!!    that could not be choose by the user (eg the space step which depend of the
-!!    domain size and the number of mesh) and some other are "hidden" parameter used
-!!    to avoid communication error or to allowed some optimization. For example, it
-!!    include variable used to create unique tag for the different mpi communication,
-!!    to gather line in group and to index these group.
-subroutine discretisation_init()
-
-    implicit none
+
+  end subroutine discretisation_default
+
+  !> To initialize some hidden mesh parameters
+  !!    @author Jean-Baptiste Lagaert
+  !! @details
+  !!        In order to deal well with the mpi topology, the data structure and the
+  !!    mesh cut, some other parameters have to be initialised. Some are parameters
+  !!    that could not be choose by the user (eg the space step which depend of the
+  !!    domain size and the number of mesh) and some other are "hidden" parameter used
+  !!    to avoid communication error or to allowed some optimization. For example, it
+  !!    include variable used to create unique tag for the different mpi communication,
+  !!    to gather line in group and to index these group.
+  subroutine discretisation_init()
 
     integer                 :: direction    ! direction (along X = 1, along Y = 2, along Z = 3)
     integer                 :: group_dir    ! direction "bis"
@@ -367,71 +354,71 @@ subroutine discretisation_init()
     ! Group of line along X
     N_group(3,1) = N_proc(1)/group_size(3,1)
     N_group(3,2) = N_proc(2)/group_size(3,2)
-! But not everything is done by groups !!
-! Group of line along X
-N_group(1,1) = N_proc(2)
-N_group(1,2) = N_proc(3)
-! Group of line along X
-N_group(2,1) = N_proc(1)
-N_group(2,2) = N_proc(3)
-! Group of line along X
-N_group(3,1) = N_proc(1)
-N_group(3,2) = N_proc(2)
-    
+    ! But not everything is done by groups !!
+    ! Group of line along X
+    N_group(1,1) = N_proc(2)
+    N_group(1,2) = N_proc(3)
+    ! Group of line along X
+    N_group(2,1) = N_proc(1)
+    N_group(2,2) = N_proc(3)
+    ! Group of line along X
+    N_group(3,1) = N_proc(1)
+    N_group(3,2) = N_proc(2)
+
     ! tag_size = smallest power of ten to ensure tag_size > max ind_group
     do direction = 1,3
-        tag_size(direction,:) = 1
-        do group_dir = 1,2
-            do while (N_group(direction, group_dir)/(10**tag_size(direction, group_dir))>1)
-                tag_size(direction, group_dir) = tag_size(direction, group_dir)+1 
-            end do
-        end do
+       tag_size(direction,:) = 1
+       do group_dir = 1,2
+          do while (N_group(direction, group_dir)/(10**tag_size(direction, group_dir))>1)
+             tag_size(direction, group_dir) = tag_size(direction, group_dir)+1 
+          end do
+       end do
     end do
 
     tag_rank = 1
     do while(3*max(nb_proc_dim(1),nb_proc_dim(2),nb_proc_dim(3))/(10**tag_rank)>=1)
-        tag_rank = tag_rank+1
+       tag_rank = tag_rank+1
     end do
     if (tag_rank == 1) tag_rank = 2
 
     ! Print some information about mesh used
     if(cart_rank==0) then
-        write(*,'(a)') ''
-        if(mesh_init) then 
-            write(*,'(6x,a,a24,a)') 'XXXXXX','group sized changed ','XXXXXX'
-        else
-            write(*,'(6x,a,a30,a)') '-- ','mesh size',' --'
-            write(*,'(6x,a,3(x,i0))') 'global size =',N
-            write(*,'(6x,a,3(x,i0))') 'local size =',N_proc
-        end if
-        write(*,'(6x,a,2(x,i0))') 'group size along X =',group_size(1,:)
-        write(*,'(6x,a,2(x,i0))') 'group size along Y =',group_size(2,:)
-        write(*,'(6x,a,2(x,i0))') 'group size along Z =',group_size(3,:)
-        write(*,'(6x,a)') '-- initialisation: tag generation --'
-        do direction = 1,3
-            write(*,'(6x,a,i0,a,i0,x,i0)') 'tag_size(',direction,',:) = ', tag_size(direction,:)
-        end do
-        write(*,'(6x,a,i0)') 'tag_rank = ', tag_rank
-        write(*,'(6x,a)') '------------------------------------'
-        write(*,'(a)') ''
+       write(*,'(a)') ''
+       if(mesh_init) then 
+          write(*,'(6x,a,a24,a)') 'XXXXXX','group sized changed ','XXXXXX'
+       else
+          write(*,'(6x,a,a30,a)') '-- ','mesh size',' --'
+          write(*,'(6x,a,3(x,i0))') 'global size =',N
+          write(*,'(6x,a,3(x,i0))') 'local size =',N_proc
+       end if
+       write(*,'(6x,a,2(x,i0))') 'group size along X =',group_size(1,:)
+       write(*,'(6x,a,2(x,i0))') 'group size along Y =',group_size(2,:)
+       write(*,'(6x,a,2(x,i0))') 'group size along Z =',group_size(3,:)
+       write(*,'(6x,a)') '-- initialisation: tag generation --'
+       do direction = 1,3
+          write(*,'(6x,a,i0,a,i0,x,i0)') 'tag_size(',direction,',:) = ', tag_size(direction,:)
+       end do
+       write(*,'(6x,a,i0)') 'tag_rank = ', tag_rank
+       write(*,'(6x,a)') '------------------------------------'
+       write(*,'(a)') ''
     end if
 
     mesh_init = .true.
 
-end subroutine discretisation_init
-
-!> Compute unique tag for mpi message by concatenation of position (ie line coordinate), proc_gap and unique Id
-!!    @param[in]    ind_group   = indice of current group of line
-!!    @param[in]    tag_param   = couple of int unique for each message (used to create the tag)
-!!    @param[in]    direction   = current direction
-!!    @param[in]    proc_gap    = number of processus between the sender and the receiver
-!!    @return       tag         = unique tag: at each message send during an iteration have a different tag
-!!@details 
-!!     Use this procedure to compute tag in order to communicate with a distant processus or/and when
-!!    you will send more then two message. It produce longer tag compute_tag_NP because rather tyo use 0/1 it 
-!!    put the gap between the sender and the receiver (ie the number of processus between them) in the tag.
-!!    Using these two procedure allow to obtain more unique tag for communication.
-function compute_tag_gap(ind_group, tag_param, direction,proc_gap) result(tag)
+  end subroutine discretisation_init
+
+  !> Compute unique tag for mpi message by concatenation of position (ie line coordinate), proc_gap and unique Id
+  !!    @param[in]    ind_group   = indice of current group of line
+  !!    @param[in]    tag_param   = couple of int unique for each message (used to create the tag)
+  !!    @param[in]    direction   = current direction
+  !!    @param[in]    proc_gap    = number of processus between the sender and the receiver
+  !!    @return       tag         = unique tag: at each message send during an iteration have a different tag
+  !!@details 
+  !!     Use this procedure to compute tag in order to communicate with a distant processus or/and when
+  !!    you will send more then two message. It produce longer tag compute_tag_NP because rather tyo use 0/1 it 
+  !!    put the gap between the sender and the receiver (ie the number of processus between them) in the tag.
+  !!    Using these two procedure allow to obtain more unique tag for communication.
+  function compute_tag_gap(ind_group, tag_param, direction,proc_gap) result(tag)
 
     ! Returned variable
     integer                             :: tag
@@ -446,9 +433,9 @@ function compute_tag_gap(ind_group, tag_param, direction,proc_gap) result(tag)
     abs_proc_gap = max(abs(proc_gap),1)
     tag = (tag_param(1)*10+direction)*(10**(tag_rank+1))
     if (proc_gap>=0) then
-        tag = tag + proc_gap*10 
+       tag = tag + proc_gap*10 
     else
-        tag = tag - proc_gap*10 +1
+       tag = tag - proc_gap*10 +1
     end if
     tag = (tag*(10**tag_size(direction,1)))+(ind_group(1)-1)
     tag = ((tag*(10**tag_size(direction,2)))+(ind_group(2)-1))
@@ -457,51 +444,51 @@ function compute_tag_gap(ind_group, tag_param, direction,proc_gap) result(tag)
     ! As tag can not be to big (it must be a legal integer and smaller than
     ! maximum mpi tag)
     if ((tag<0).or.(tag>MPI_TAG_UB))  then
-        !print*, 'tag too big - regenerated'
-        tag = (tag_param(1))*(10**(tag_rank+1))
-        if (proc_gap>=0) then
-            tag = tag + proc_gap*10 
-        else
-            tag = tag - proc_gap*10 +1
-        end if
-        tag = tag*(10**tag_size(direction,1))+(ind_group(1)-1)
-        tag = ((tag*(10**tag_size(direction,2)))+(ind_group(2)-1))
-        tag = (tag*10)+tag_param(2)
-        if ((tag<0).or.(tag>MPI_TAG_UB))  then
-            !print*, 'tag very too big - regenerated'
-            tag = (tag_param(1))*(10**(tag_rank+1))
-            if (proc_gap>=0) then
-                tag = tag + proc_gap*10 
-            else
-                tag = tag - proc_gap*10 +1
-            end if
-            tag = (tag*10)+tag_param(2)
-            if ((tag<0).or.(tag>MPI_TAG_UB))  then
-                tag = tag_param(1)*10 + tag_param(2)
-                if (proc_gap<0) tag = tag +100
-                !print*, 'rank = ', cart_rank, ' coord = ', coord
-                !print*, 'ind_group = ', ind_group, ' ; tag_param = ', tag_param
-                !print*, 'direction = ', direction, ' gap = ', proc_gap ,' and tag = ', tag
-            end if
-        end if
+       !print*, 'tag too big - regenerated'
+       tag = (tag_param(1))*(10**(tag_rank+1))
+       if (proc_gap>=0) then
+          tag = tag + proc_gap*10 
+       else
+          tag = tag - proc_gap*10 +1
+       end if
+       tag = tag*(10**tag_size(direction,1))+(ind_group(1)-1)
+       tag = ((tag*(10**tag_size(direction,2)))+(ind_group(2)-1))
+       tag = (tag*10)+tag_param(2)
+       if ((tag<0).or.(tag>MPI_TAG_UB))  then
+          !print*, 'tag very too big - regenerated'
+          tag = (tag_param(1))*(10**(tag_rank+1))
+          if (proc_gap>=0) then
+             tag = tag + proc_gap*10 
+          else
+             tag = tag - proc_gap*10 +1
+          end if
+          tag = (tag*10)+tag_param(2)
+          if ((tag<0).or.(tag>MPI_TAG_UB))  then
+             tag = tag_param(1)*10 + tag_param(2)
+             if (proc_gap<0) tag = tag +100
+             !print*, 'rank = ', cart_rank, ' coord = ', coord
+             !print*, 'ind_group = ', ind_group, ' ; tag_param = ', tag_param
+             !print*, 'direction = ', direction, ' gap = ', proc_gap ,' and tag = ', tag
+          end if
+       end if
     end if
-! XXX Fin aide au debug XXX
+    ! XXX Fin aide au debug XXX
 
-end function compute_tag_gap
+  end function compute_tag_gap
 
 
-!> Compute unique tag for mpi message by concatenation of position(ie line coordinate), +1 or -1 and unique Id
-!!    @param[in]    ind_group   = indice of current group of line
-!!    @param[in]    tag_param   = couple of int unique for each message (used to create the tag)
-!!    @param[in]    direction   = current direction
-!!    @return       tag_table   = unique couple tag: use tag_table(1) for mesage to previous proc. (or first 
-!!                                message ) and tag_table(2) for the other message.
-!!@details 
-!!     Use this procedure to compute tag for communication with your neighbor or when only two message are send:
-!!    it produce smaller tag then compute_tag_gap because the gap between sender and receiver are replaced by 1,
-!!    for communicate with previous processus (or first of the two message), or 0, for communication with next 
-!!    processus (or the second message). It allow to reuse some unique Id.
-function compute_tag_NP(ind_group, tag_param, direction) result (tag_table)
+  !> Compute unique tag for mpi message by concatenation of position(ie line coordinate), +1 or -1 and unique Id
+  !!    @param[in]    ind_group   = indice of current group of line
+  !!    @param[in]    tag_param   = couple of int unique for each message (used to create the tag)
+  !!    @param[in]    direction   = current direction
+  !!    @return       tag_table   = unique couple tag: use tag_table(1) for mesage to previous proc. (or first 
+  !!                                message ) and tag_table(2) for the other message.
+  !!@details 
+  !!     Use this procedure to compute tag for communication with your neighbor or when only two message are send:
+  !!    it produce smaller tag then compute_tag_gap because the gap between sender and receiver are replaced by 1,
+  !!    for communicate with previous processus (or first of the two message), or 0, for communication with next 
+  !!    processus (or the second message). It allow to reuse some unique Id.
+  function compute_tag_NP(ind_group, tag_param, direction) result (tag_table)
 
     ! Returned variable
     integer, dimension(2)               :: tag_table
@@ -512,9 +499,9 @@ function compute_tag_NP(ind_group, tag_param, direction) result (tag_table)
 
     tag_table(2) = (tag_param(1)*10+direction)*10
     tag_table(1) = tag_table(2)
-    
+
     tag_table(2) = tag_table(2) +1
-   
+
     tag_table(2) = (tag_table(2)*(10**tag_size(direction,1)))+(ind_group(1)-1)
     tag_table(1) = (tag_table(1)*(10**tag_size(direction,1)))+(ind_group(1)-1)
 
@@ -526,111 +513,107 @@ function compute_tag_NP(ind_group, tag_param, direction) result (tag_table)
 
     ! Check if tag limitations are respected.
     if ((minval(tag_table)<0).or.(maxval(tag_table)>MPI_TAG_UB))  then
-        tag_table = tag_param(1)*100 + tag_param(2)*10
-        tag_table = tag_table + (/1,2/)
-        !print*, 'rank = ', cart_rank, ' coord = ', coord
-        !print*, 'ind_group = ', ind_group, ' ; tag_param = ', tag_param
-        !print*, 'direction = ', direction, ' and tag = ', tag_table
+       tag_table = tag_param(1)*100 + tag_param(2)*10
+       tag_table = tag_table + (/1,2/)
+       !print*, 'rank = ', cart_rank, ' coord = ', coord
+       !print*, 'ind_group = ', ind_group, ' ; tag_param = ', tag_param
+       !print*, 'direction = ', direction, ' and tag = ', tag_table
     end if
 
 
-end function compute_tag_NP
+  end function compute_tag_NP
 
 
-!> Adjust the private variable "group_size": line are gathering on group of same
-!! size undependant from the direction
-!!    @param[in]    s       =  integer such as group will gather sxs lines
-!!    @param[in]    init    =  logical to said if it is a default init of group_size
-!! @details
-!!    Create group of line s x s along the three direction.
-subroutine set_group_size_1(s, init)
+  !> Adjust the private variable "group_size": line are gathering on group of same
+  !! size undependant from the direction
+  !!    @param[in]    s       =  integer such as group will gather sxs lines
+  !!    @param[in]    init    =  logical to said if it is a default init of group_size
+  !! @details
+  !!    Create group of line s x s along the three direction.
+  subroutine set_group_size_1(s, init)
 
-    implicit none
     integer, intent(in)             :: s
     logical, intent(in), optional   :: init
 
     if (.not.mesh_init) then
-        group_size = s
-        ! And now group size is initialized !
-        group_init = .true.
+       group_size = s
+       ! And now group size is initialized !
+       group_init = .true.
     else
-        if (all(mod(N_proc,s)==0)) group_size = s
-        call discretisation_init()
+       if (all(mod(N_proc,s)==0)) group_size = s
+       call discretisation_init()
     end if
 
-    if (present(init)) call set_group_size(init)
+    if (present(init)) call set_group_size_init(init)
 
-end subroutine set_group_size_1
+  end subroutine set_group_size_1
 
 
-!> Adjust the private variable "group_size": line are gathering on group of same
-!! size undependant from the direction
-!!    @param[in]    s1      =  integer such as group will gather s1 line along first remaining direction
-!!    @param[in]    s2      =  integer such as group will gather s1 line along second remaining direction
-!!    @param[in]    init    =  logical to said if it is a default init of group_size
-!! @details
-!!    Created group will gather s1 x s2 lines
-subroutine set_group_size_1x2(s1, s2, init)
+  !> Adjust the private variable "group_size": line are gathering on group of same
+  !! size undependant from the direction
+  !!    @param[in]    s1      =  integer such as group will gather s1 line along first remaining direction
+  !!    @param[in]    s2      =  integer such as group will gather s1 line along second remaining direction
+  !!    @param[in]    init    =  logical to said if it is a default init of group_size
+  !! @details
+  !!    Created group will gather s1 x s2 lines
+  subroutine set_group_size_1x2(s1, s2, init)
 
-    implicit none
     integer, intent(in)             :: s1, s2
     logical, intent(in), optional   :: init
 
     if (.not. mesh_init) then
-        group_size(:,1) = s1
-        group_size(:,2) = s2
-        ! And now group size is initialized !
-        group_init = .true.
+       group_size(:,1) = s1
+       group_size(:,2) = s2
+       ! And now group size is initialized !
+       group_init = .true.
     else
-        if (all(mod(N_proc,s1)==0)) group_size(:,1) = s1
-        if (all(mod(N_proc,s2)==0)) group_size(:,2) = s2
-        call discretisation_init()
+       if (all(mod(N_proc,s1)==0)) group_size(:,1) = s1
+       if (all(mod(N_proc,s2)==0)) group_size(:,2) = s2
+       call discretisation_init()
     end if
 
-    if (present(init)) call set_group_size(init)
+    if (present(init)) call set_group_size_init(init)
 
-end subroutine set_group_size_1x2
+  end subroutine set_group_size_1x2
 
 
-!> Adjust the private variable "group_size": line are gathering on group of a
-!! size depending of the current direction.
-!!    @param[in]    sX =  integer such as group of lines along X will gather sX x sX lines
-!!    @param[in]    sY =  integer such as group of lines along Y will gather sY x sY lines
-!!    @param[in]    sZ =  integer such as group of lines along Z will gather sZ x sX lines
-!!    @param[in]    init    =  logical to said if it is a default init of group_size
-subroutine set_group_size_3(sX, sY, sZ, init)
+  !> Adjust the private variable "group_size": line are gathering on group of a
+  !! size depending of the current direction.
+  !!    @param[in]    sX =  integer such as group of lines along X will gather sX x sX lines
+  !!    @param[in]    sY =  integer such as group of lines along Y will gather sY x sY lines
+  !!    @param[in]    sZ =  integer such as group of lines along Z will gather sZ x sX lines
+  !!    @param[in]    init    =  logical to said if it is a default init of group_size
+  subroutine set_group_size_3(sX, sY, sZ, init)
 
-    implicit none
     integer, intent(in)             :: sX, sY, sZ
     logical, intent(in), optional   :: init
 
     if (.not.mesh_init) then
-        group_size(1,:) = sX
-        group_size(2,:) = sY
-        group_size(3,:) = sZ
-        ! And now group size is initialized !
-        group_init = .true.
+       group_size(1,:) = sX
+       group_size(2,:) = sY
+       group_size(3,:) = sZ
+       ! And now group size is initialized !
+       group_init = .true.
     else
-        if (all(mod(N_proc(2:3),sX)==0)) group_size(1,:) = sX
-        if ((mod(N_proc(1),sY)==0).and.(mod(N_proc(3),sY)==0)) group_size(2,:) = sY
-        if ((mod(N_proc(1),sZ)==0).and.(mod(N_proc(2),sZ)==0)) group_size(3,:) = sZ
-        call discretisation_init()
+       if (all(mod(N_proc(2:3),sX)==0)) group_size(1,:) = sX
+       if ((mod(N_proc(1),sY)==0).and.(mod(N_proc(3),sY)==0)) group_size(2,:) = sY
+       if ((mod(N_proc(1),sZ)==0).and.(mod(N_proc(2),sZ)==0)) group_size(3,:) = sZ
+       call discretisation_init()
     end if
 
-    if (present(init)) call set_group_size(init)
+    if (present(init)) call set_group_size_init(init)
 
 
-end subroutine set_group_size_3
+  end subroutine set_group_size_3
 
-!> Adjust the private variable "group_size": line are gathering on group of same
-!! size undependant from the direction
-!!    @param[in]    init    =  logical to said if it is a default init of group_size
-!! @details
-!!    Create group of acceptable default size (or re-init group size if optional
-!! argument "init" is present and set to true).
-subroutine set_group_size_init(init)
+  !> Adjust the private variable "group_size": line are gathering on group of same
+  !! size undependant from the direction
+  !!    @param[in]    init    =  logical to said if it is a default init of group_size
+  !! @details
+  !!    Create group of acceptable default size (or re-init group size if optional
+  !! argument "init" is present and set to true).
+  subroutine set_group_size_init(init)
 
-    implicit none
     logical, intent(in), optional   :: init
 
     ! To check if group size is well defined
@@ -639,37 +622,37 @@ subroutine set_group_size_init(init)
     if (present(init)) group_init = init
 
     if (.not.group_init) then
-        ! Setup the size of line group to a default value
-        if (all(mod(N_proc,5)==0)) then
-            group_size = 5
-        else if (all(mod(N_proc,4)==0)) then
-            group_size = 4
-        else if (all(mod(N_proc,2)==0)) then
-            group_size = 2
-        else
-            group_size = 1
-        end if
-        ! And now group size is initialized !
-        group_init = .true.
+       ! Setup the size of line group to a default value
+       if (all(mod(N_proc,5)==0)) then
+          group_size = 5
+       else if (all(mod(N_proc,4)==0)) then
+          group_size = 4
+       else if (all(mod(N_proc,2)==0)) then
+          group_size = 2
+       else
+          group_size = 1
+       end if
+       ! And now group size is initialized !
+       group_init = .true.
     else
-        domain_size(1,:) = (/N_proc(2), N_proc(3)/)
-        domain_size(2,:) = (/N_proc(1), N_proc(3)/)
-        domain_size(3,:) = (/N_proc(1), N_proc(2)/)
-
-        where (mod(domain_size,group_size)/=0)
-            where(mod(domain_size,5)==0)
-                group_size=5
-            elsewhere(mod(domain_size,4)==0)
-                group_size=4
-            elsewhere(mod(domain_size,2)==0)
-                group_size=2
-            elsewhere
-                group_size=1
-            end where
-        end where
+       domain_size(1,:) = (/N_proc(2), N_proc(3)/)
+       domain_size(2,:) = (/N_proc(1), N_proc(3)/)
+       domain_size(3,:) = (/N_proc(1), N_proc(2)/)
+
+       where (mod(domain_size,group_size)/=0)
+          where(mod(domain_size,5)==0)
+             group_size=5
+          elsewhere(mod(domain_size,4)==0)
+             group_size=4
+          elsewhere(mod(domain_size,2)==0)
+             group_size=2
+          elsewhere
+             group_size=1
+          end where
+       end where
     end if
 
-end subroutine set_group_size_init
+  end subroutine set_group_size_init
 
 end module cart_topology
 !> @}
diff --git a/HySoP/src/Unstable/LEGI/src/particles/advec.f90 b/HySoP/src/Unstable/LEGI/src/particles/advec.f90
index f963bdd2a..6fa7e0341 100644
--- a/HySoP/src/Unstable/LEGI/src/particles/advec.f90
+++ b/HySoP/src/Unstable/LEGI/src/particles/advec.f90
@@ -22,47 +22,47 @@
 !------------------------------------------------------------------------------
 module advec
 
-    use string
-    use advecX
-    use advecY
-    use advecZ
+  use string
+  use advecX
+  use advecY
+  use advecZ
 
-    implicit none
+  implicit none
 
-    ! ===== Variables =====
-    !> numerical method use to advect the scalar
-    character(len=str_short), private   :: type_part_solv   
-    !> dimensionnal splitting (eg classical, Strang or particle)
-    character(len=str_short), private   :: dim_splitting    
+  ! ===== Variables =====
+  !> numerical method use to advect the scalar
+  character(len=str_short), private   :: type_part_solv   
+  !> dimensionnal splitting (eg classical, Strang or particle)
+  character(len=str_short), private   :: dim_splitting    
 
 
-    ! ===== Public procedures =====
-    ! Scheme used to advec the scalar (order 2 or 4 ?)
-    public                              :: type_part_solver
+  ! ===== Public procedures =====
+  ! Scheme used to advec the scalar (order 2 or 4 ?)
+  public                              :: type_part_solver
 
-    ! Advection methods
-    public                              :: advec_init   ! initialize the scalar solver
-    public                              :: advec_step   ! advec the scalar field during a time step.
+  ! Advection methods
+  public                              :: advec_init   ! initialize the scalar solver
+  public                              :: advec_step   ! advec the scalar field during a time step.
 
 contains
 
-! ===== Public methods =====
+  ! ===== Public methods =====
 
-!> Return the name of the particle method used for the advection
-!!    @return type_part_solver      = numerical method used for advection
-function type_part_solver()
+  !> Return the name of the particle method used for the advection
+  !!    @return type_part_solver      = numerical method used for advection
+  function type_part_solver()
     character(len=str_short)    :: type_part_solver
-    
+
     type_part_solver = type_part_solv
-end function
-
-!> Initialise the particle advection methods
-!!    @param[in]    order       = to choose the remeshing method (and thus the order)
-!!    @param[out]   stab_coeff  = stability coefficient (condition stability is
-!!                                  dt< stab_coeff/norm_inf(V))
-!!    @param[in]    verbosity   = to display info about chosen remeshing formula (optional)
-subroutine advec_init(order, stab_coeff, verbosity)
-    
+  end function type_part_solver
+
+  !> Initialise the particle advection methods
+  !!    @param[in]    order       = to choose the remeshing method (and thus the order)
+  !!    @param[out]   stab_coeff  = stability coefficient (condition stability is
+  !!                                  dt< stab_coeff/norm_inf(V))
+  !!    @param[in]    verbosity   = to display info about chosen remeshing formula (optional)
+  subroutine advec_init(order, stab_coeff, verbosity)
+
     ! Input/Output
     character(len=*), optional, intent(in)  ::  order
     logical, optional, intent(in)           ::  verbosity
@@ -70,16 +70,16 @@ subroutine advec_init(order, stab_coeff, verbosity)
 
     ! Use default solver if it is not chosen by the user.
     if(present(order)) then
-        type_part_solv = order
+       type_part_solv = order
     else 
-        type_part_solv = 'p_O2'
+       type_part_solv = 'p_O2'
     end if
 
     ! Initialize the solver
     if (present(verbosity)) then
-        call AC_solver_init(type_part_solv, verbosity)
+       call AC_solver_init(type_part_solv, verbosity)
     else
-        call AC_solver_init(type_part_solv)
+       call AC_solver_init(type_part_solv)
     end if
 
     ! Choosing the dimensionnal splitting to use
@@ -89,18 +89,18 @@ subroutine advec_init(order, stab_coeff, verbosity)
     ! Compute stability coefficient
     if (present(stab_coeff)) stab_coeff = 1.0/(2.0*dble(bl_size))
 
-end subroutine advec_init
+  end subroutine advec_init
 
 
-!> advec_scheme
-!!    @param[in]        dt          = time step
-!!    @param[in]        Vx          = velocity along x (could be discretised on a bigger mesh then the scalar)
-!!    @param[in]        Vy          = velocity along y 
-!!    @param[in]        Vz          = velocity along z
-!!    @param[in,out]    scal        = scalar field to advect
-!!    @param[in]        dim_split   = dimensionnal splitting (eg classical,
-!!                                        Strang splitting or particle splitting)
-subroutine advec_step(dt, Vx, Vy, Vz, scal, dim_split)
+  !> advec_scheme
+  !!    @param[in]        dt          = time step
+  !!    @param[in]        Vx          = velocity along x (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in]        Vy          = velocity along y 
+  !!    @param[in]        Vz          = velocity along z
+  !!    @param[in,out]    scal        = scalar field to advect
+  !!    @param[in]        dim_split   = dimensionnal splitting (eg classical,
+  !!                                        Strang splitting or particle splitting)
+  subroutine advec_step(dt, Vx, Vy, Vz, scal, dim_split)
 
     ! Input/Output
     real(WP), intent(in)                                                :: dt
@@ -112,40 +112,40 @@ subroutine advec_step(dt, Vx, Vy, Vz, scal, dim_split)
 
     ! Default dimensionnal splitting if the user do not choose it
     if(present(dim_split)) then
-        splitting = dim_split
+       splitting = dim_split
     else 
-        splitting = dim_splitting
+       splitting = dim_splitting
     end if
 
     ! Scheme used for advection : particle method (order 2 or 4) or spectral one.
     if (type_solv=='spectral') then
-        print*, 'Solveur non implémenté'
+       print*, 'Solveur non implémenté'
     end if
-        
+
     select case(splitting)
-        case('classic')
-            call advecX_calc(dt, Vx, scal, type_part_solv)
-            call advecY_calc(dt, Vy, scal, type_part_solv)
-            call advecZ_calc(dt, Vz, scal, type_part_solv)
-        case('Strang')
-            call advecX_calc(dt/2.0, Vx, scal, type_part_solv)
-            call advecY_calc(dt/2.0, Vy, scal, type_part_solv)
-            call advecZ_calc(dt/2.0, Vz, scal, type_part_solv)
-            call advecZ_calc(dt/2.0, Vz, scal, type_part_solv)
-            call advecY_calc(dt/2.0, Vy, scal, type_part_solv)
-            call advecX_calc(dt/2.0, Vx, scal, type_part_solv)
-        case default
-            call advecX_calc(dt/2.0, Vx, scal, type_part_solv)
-            call advecY_calc(dt/2.0, Vy, scal, type_part_solv)
-            call advecZ_calc(dt/2.0, Vz, scal, type_part_solv)
-            call advecZ_calc(dt/2.0, Vz, scal, type_part_solv)
-            call advecY_calc(dt/2.0, Vy, scal, type_part_solv)
-            call advecX_calc(dt/2.0, Vx, scal, type_part_solv)
+    case('classic')
+       call advecX_calc(dt, Vx, scal, type_part_solv)
+       call advecY_calc(dt, Vy, scal, type_part_solv)
+       call advecZ_calc(dt, Vz, scal, type_part_solv)
+    case('Strang')
+       call advecX_calc(dt/2.0, Vx, scal, type_part_solv)
+       call advecY_calc(dt/2.0, Vy, scal, type_part_solv)
+       call advecZ_calc(dt/2.0, Vz, scal, type_part_solv)
+       call advecZ_calc(dt/2.0, Vz, scal, type_part_solv)
+       call advecY_calc(dt/2.0, Vy, scal, type_part_solv)
+       call advecX_calc(dt/2.0, Vx, scal, type_part_solv)
+    case default
+       call advecX_calc(dt/2.0, Vx, scal, type_part_solv)
+       call advecY_calc(dt/2.0, Vy, scal, type_part_solv)
+       call advecZ_calc(dt/2.0, Vz, scal, type_part_solv)
+       call advecZ_calc(dt/2.0, Vz, scal, type_part_solv)
+       call advecY_calc(dt/2.0, Vy, scal, type_part_solv)
+       call advecX_calc(dt/2.0, Vx, scal, type_part_solv)
     end select
 
-end subroutine advec_step
-    
+  end subroutine advec_step
+
 
-!> ===== Private procedure =====
+  !> ===== Private procedure =====
 end module advec
 !> @}
diff --git a/HySoP/src/Unstable/LEGI/src/particles/advecX.f90 b/HySoP/src/Unstable/LEGI/src/particles/advecX.f90
index 2255d78b9..9636852d9 100644
--- a/HySoP/src/Unstable/LEGI/src/particles/advecX.f90
+++ b/HySoP/src/Unstable/LEGI/src/particles/advecX.f90
@@ -29,59 +29,59 @@
 
 module advecX
 
-    use precision
-    use advec_common
-
-    implicit none
-
-    ! ===== Public procedures =====
-    !> Generique procedure to advect the scalar with a particles solver
-    public                  :: advecX_calc
-    !----- (corrected) Remeshing method (these methods are set to public in validation purposes) -----
-    public                  :: Xremesh_O2       ! order 2
-    public                  :: Xremesh_O4       ! order 4
-    ! ----- Other remeshing formula -----
-    public                  :: Xremesh_Mprime6  
-
-    ! ===== Private porcedures =====
-    !> particles solver with remeshing method at order 2
-    private                 :: advecX_calc_O2       ! remeshing method at order 2
-    private                 :: advecX_calc_O2_group ! remeshing method at order 2
-    private                 :: advecX_calc_Mprime6  ! M'6 remeshing method
-    ! Particles initialisation
-    private                 :: advecX_init      ! generic initialization of particle position, velocity.
-    private                 :: advecX_init_line ! initialisation for only one line of particles
-    private                 :: advecX_init_group! initialisation for a group of line of particles
-
-    ! ===== Private variable ====
-    ! particles solver with different remeshing formula
-    integer, dimension(2), private  :: gpX_size     
-    !> Current direction = along X
-    integer, private, parameter     :: direction=1          
-
-    interface advecX_init
-        module procedure advecX_init_line, advecX_init_group
-    end interface advecX_init
+  use precision
+  use advec_remeshing_formula
+
+  implicit none
+
+  ! ===== Public procedures =====
+  !> Generique procedure to advect the scalar with a particles solver
+  public                  :: advecX_calc
+  !----- (corrected) Remeshing method (these methods are set to public in validation purposes) -----
+  public                  :: Xremesh_O2       ! order 2
+  public                  :: Xremesh_O4       ! order 4
+  ! ----- Other remeshing formula -----
+  public                  :: Xremesh_Mprime6  
+
+  ! ===== Private porcedures =====
+  !> particles solver with remeshing method at order 2
+  private                 :: advecX_calc_O2       ! remeshing method at order 2
+  private                 :: advecX_calc_O2_group ! remeshing method at order 2
+  private                 :: advecX_calc_Mprime6  ! M'6 remeshing method
+  ! Particles initialisation
+  private                 :: advecX_init      ! generic initialization of particle position, velocity.
+  private                 :: advecX_init_line ! initialisation for only one line of particles
+  private                 :: advecX_init_group! initialisation for a group of line of particles
+
+  ! ===== Private variable ====
+  ! particles solver with different remeshing formula
+  integer, dimension(2), private  :: gpX_size     
+  !> Current direction = along X
+  integer, private, parameter     :: direction=1          
+
+  interface advecX_init
+     module procedure advecX_init_line, advecX_init_group
+  end interface advecX_init
 
 contains
 
-! #####################################################################################
-! #####                                                                           #####
-! #####                         Public procedure                                  #####
-! #####                                                                           #####
-! #####################################################################################
-
-! ====================================================================
-! ====================    Generic solveur         ====================
-! ====================================================================
-
-!> Scalar advection (this procedure call the right solver, depending on the simulation setup) 
-!!    @param[in]        dt          = time step
-!!    @param[in]        Vx          = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    SC          = scalar field to advect
-!!    @param[in]        type_solver = scheme use for the advection (particle with order 2 or 4)
-subroutine advecX_calc(dt, Vx, SC, type_solver)
-  
+  ! #####################################################################################
+  ! #####                                                                           #####
+  ! #####                         Public procedure                                  #####
+  ! #####                                                                           #####
+  ! #####################################################################################
+
+  ! ====================================================================
+  ! ====================    Generic solveur         ====================
+  ! ====================================================================
+
+  !> Scalar advection (this procedure call the right solver, depending on the simulation setup) 
+  !!    @param[in]        dt          = time step
+  !!    @param[in]        Vx          = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    SC          = scalar field to advect
+  !!    @param[in]        type_solver = scheme use for the advection (particle with order 2 or 4)
+  subroutine advecX_calc(dt, Vx, SC, type_solver)
+
     ! Input/Output
     real(WP), intent(in)                                                :: dt
     real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vx
@@ -96,36 +96,36 @@ subroutine advecX_calc(dt, Vx, SC, type_solver)
 
     ! Call the right solver depending on the space order we want to.
     select case(type_solver)
-        case('p_O2')
-            call advecX_calc_O2_group(dt, Vx, SC, group_size(direction,:))
-            !call advecX_calc_O2(dt, Vx, SC)
-        case('p_O4')
-            call advecX_calc_O4(dt, Vx, SC, group_size(direction,:))
-        case('p_M6')
-            call advecX_calc_Mprime6(dt, Vx, SC, group_size(direction,:))
-        case default
-            call advecX_calc_O2_group(dt, Vx, SC, group_size(direction,:))
+    case('p_O2')
+       call advecX_calc_O2_group(dt, Vx, SC, group_size(direction,:))
+       !call advecX_calc_O2(dt, Vx, SC)
+    case('p_O4')
+       call advecX_calc_O4(dt, Vx, SC, group_size(direction,:))
+    case('p_M6')
+       call advecX_calc_Mprime6(dt, Vx, SC, group_size(direction,:))
+    case default
+       call advecX_calc_O2_group(dt, Vx, SC, group_size(direction,:))
     end select
 
 
-end subroutine advecX_calc
+  end subroutine advecX_calc
 
 
-! #####################################################################################
-! #####                                                                           #####
-! #####                         Private procedure                                 #####
-! #####                                                                           #####
-! #####################################################################################
+  ! #####################################################################################
+  ! #####                                                                           #####
+  ! #####                         Private procedure                                 #####
+  ! #####                                                                           #####
+  ! #####################################################################################
 
-! ====================================================================
-! ====================    Different solvers       ====================
-! ====================================================================
+  ! ====================================================================
+  ! ====================    Different solvers       ====================
+  ! ====================================================================
 
-!> Advection during a time step dt - order 2
-!!    @param[in]        dt      = time step
-!!    @param[in]        Vx      = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal3D   = scalar field to advect
-subroutine advecX_calc_O2(dt,Vx,scal3D)
+  !> Advection during a time step dt - order 2
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vx      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D   = scalar field to advect
+  subroutine advecX_calc_O2(dt,Vx,scal3D)
 
     ! Input/Output
     real(WP), intent(in)                                                :: dt
@@ -142,39 +142,39 @@ subroutine advecX_calc_O2(dt,Vx,scal3D)
 
     ind_group = 0
     do k = 1, N_proc(3)
-        ind_group(2) = ind_group(2) + 1
-        ind_group(1) = 0
-        do j = 1, N_proc(2)
-        ind_group(1) = ind_group(1) + 1
-
-            ! ===== Init particles =====
-            call advecX_init(Vx, j, k, p_pos_adim, p_V)
-
-            ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_particle_velocity(dt, direction, ind_group, p_pos_adim, p_V)
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
-
-            ! ===== Remeshing =====
-            ! -- Pre-Remeshing: Determine blocks type and tag particles --
-            call AC_type_and_block(dt, direction, ind_group, p_V, &
-                    & bl_type, bl_tag)
-            ! -- Remeshing --
-            call Xremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal3D)
-
-        end do
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do j = 1, N_proc(2)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecX_init(Vx, j, k, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block(dt, direction, ind_group, p_V, &
+               & bl_type, bl_tag)
+          ! -- Remeshing --
+          call Xremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal3D)
+
+       end do
     end do
 
-end subroutine advecX_calc_O2
+  end subroutine advecX_calc_O2
 
 
-!> Advection during a time step dt - order 2
-!!    @param[in]        dt      = time step
-!!    @param[in]        Vx      = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal3D  = scalar field to advect
-!!    @param[in]        gs      = size of groups (along X direction)
-subroutine advecX_calc_O2_group(dt,Vx,scal3D,gs)
+  !> Advection during a time step dt - order 2
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vx      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of groups (along X direction)
+  subroutine advecX_calc_O2_group(dt,Vx,scal3D,gs)
 
     ! Input/Output
     real(WP), intent(in)                                                :: dt
@@ -192,39 +192,39 @@ subroutine advecX_calc_O2_group(dt,Vx,scal3D,gs)
     ind_group = 0
 
     do k = 1, N_proc(3), gs(2)
-        ind_group(2) = ind_group(2) + 1
-        ind_group(1) = 0
-        do j = 1, N_proc(2), gs(1)
-            ind_group(1) = ind_group(1) + 1
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do j = 1, N_proc(2), gs(1)
+          ind_group(1) = ind_group(1) + 1
 
-            ! ===== Init particles =====
-            call advecX_init_group(Vx, j, k, gs, p_pos_adim, p_V)
+          ! ===== Init particles =====
+          call advecX_init_group(Vx, j, k, gs, p_pos_adim, p_V)
 
-            ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
 
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
 
-            ! ===== Remeshing =====
-            ! -- Pre-Remeshing: Determine blocks type and tag particles --
-            call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
-            ! -- Remeshing --
-            call Xremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag, j,k,scal3D)
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Xremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag, j,k,scal3D)
 
-        end do
+       end do
     end do
 
-end subroutine advecX_calc_O2_group
+  end subroutine advecX_calc_O2_group
 
 
-!> Advection during a time step dt - order 4
-!!    @param[in]        dt      = time step
-!!    @param[in]        Vx      = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal3D  = scalar field to advect
-!!    @param[in]        gs      = size of groups (along X direction)
-subroutine advecX_calc_O4(dt,Vx,scal3D,gs)
+  !> Advection during a time step dt - order 4
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vx      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of groups (along X direction)
+  subroutine advecX_calc_O4(dt,Vx,scal3D,gs)
 
     ! Input/Output
     real(WP), intent(in)                                                :: dt
@@ -242,39 +242,39 @@ subroutine advecX_calc_O4(dt,Vx,scal3D,gs)
     ind_group = 0
 
     do k = 1, N_proc(3), gs(2)
-        ind_group(2) = ind_group(2) + 1
-        ind_group(1) = 0
-        do j = 1, N_proc(2), gs(1)
-            ind_group(1) = ind_group(1) + 1
-
-            ! ===== Init particles =====
-            call advecX_init_group(Vx, j, k, gs, p_pos_adim, p_V)
-
-            ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
-
-            ! ===== Remeshing =====
-            ! -- Pre-Remeshing: Determine blocks type and tag particles --
-            call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
-            ! -- Remeshing --
-            call Xremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag, j,k,scal3D)
-
-        end do
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do j = 1, N_proc(2), gs(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecX_init_group(Vx, j, k, gs, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Xremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag, j,k,scal3D)
+
+       end do
     end do
 
 
-end subroutine advecX_calc_O4
+  end subroutine advecX_calc_O4
 
 
-!> Advection during a time step dt -  M'6 remeshing formula
-!!    @param[in]        dt      = time step
-!!    @param[in]        Vx      = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal3D  = scalar field to advect
-!!    @param[in]        gs      = size of groups (along X direction)
-subroutine advecX_calc_Mprime6(dt,Vx,scal3D,gs)
+  !> Advection during a time step dt -  M'6 remeshing formula
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vx      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of groups (along X direction)
+  subroutine advecX_calc_Mprime6(dt,Vx,scal3D,gs)
 
     ! Input/Output
     real(WP), intent(in)                                                :: dt
@@ -290,43 +290,43 @@ subroutine advecX_calc_Mprime6(dt,Vx,scal3D,gs)
     ind_group = 0
 
     do k = 1, N_proc(3), gs(2)
-        ind_group(2) = ind_group(2) + 1
-        ind_group(1) = 0
-        do j = 1, N_proc(2), gs(1)
-            ind_group(1) = ind_group(1) + 1
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do j = 1, N_proc(2), gs(1)
+          ind_group(1) = ind_group(1) + 1
 
-            ! ===== Init particles =====
-            call advecX_init_group(Vx, j, k, gs, p_pos_adim, p_V)
+          ! ===== Init particles =====
+          call advecX_init_group(Vx, j, k, gs, p_pos_adim, p_V)
 
-            ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
 
-            ! ===== Remeshing =====
-            ! -- Remeshing --
-            call Xremesh_Mprime6(ind_group, gs, p_pos_adim, j,k,scal3D)
+          ! ===== Remeshing =====
+          ! -- Remeshing --
+          call Xremesh_Mprime6(ind_group, gs, p_pos_adim, j,k,scal3D)
 
-        end do
+       end do
     end do
 
 
-end subroutine advecX_calc_Mprime6
+  end subroutine advecX_calc_Mprime6
 
 
-! ====================================================================
-! ====================   Remeshing subroutines    ====================
-! ====================================================================
+  ! ====================================================================
+  ! ====================   Remeshing subroutines    ====================
+  ! ====================================================================
 
-!> remeshing with an order 2 method, corrected to allow large CFL number - untagged particles
-!!    @param[in]        ind_group   = coordinate of the current group of lines
-!!    @param[in]        p_pos_adim  = adimensionned  particles position
-!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
-!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
-!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
-!!    @param[in,out]    scal        = scalar field to advect
-subroutine Xremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal)
+  !> remeshing with an order 2 method, corrected to allow large CFL number - untagged particles
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Xremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal)
 
     ! Input/Output
     integer, dimension(2), intent(in)                                   :: ind_group
@@ -339,25 +339,25 @@ subroutine Xremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal)
     ! Variable used to remesh particles in a buffer
     real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
     integer, dimension(2)               :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
-                                                        ! I will receive data
+    ! I will receive data
     integer                             :: proc_min     ! smaller gap between me and the processes to where I send data
     integer                             :: proc_max     ! smaller gap between me and the processes to where I send data
-    
+
 
     !  -- Compute ranges --
     if (bl_type(1)) then
-        ! First particle is a centered one
-        send_j_min = nint(p_pos_adim(1))-1
+       ! First particle is a centered one
+       send_j_min = nint(p_pos_adim(1))-1
     else
-        ! First particle is a left one
-        send_j_min = floor(p_pos_adim(1))-1
+       ! First particle is a left one
+       send_j_min = floor(p_pos_adim(1))-1
     end if
     if (bl_type(N_proc(direction)/bl_size +1)) then
-        ! Last particle is a centered one
-        send_j_max = nint(p_pos_adim(N_proc(direction)))+1
+       ! Last particle is a centered one
+       send_j_max = nint(p_pos_adim(N_proc(direction)))+1
     else
-        ! Last particle is a left one
-        send_j_max = floor(p_pos_adim(N_proc(direction)))+1
+       ! Last particle is a left one
+       send_j_max = floor(p_pos_adim(N_proc(direction)))+1
     end if
 
     ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
@@ -369,7 +369,7 @@ subroutine Xremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal)
 
     ! -- Remesh the particles in the buffer --
     call AC_remesh_lambda2corrected(direction, p_pos_adim, scal(:,j,k), bl_type, bl_tag, send_j_min, send_j_max, send_buffer)
-    
+
     ! -- Send the buffer to the matching processus and update the scalar field --
     scal(:,j,k) = 0
     call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min, proc_max, rece_proc, send_buffer, scal(:,j,k))
@@ -377,17 +377,17 @@ subroutine Xremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal)
     ! Deallocate all field
     deallocate(send_buffer)
 
-end subroutine Xremesh_O2
+  end subroutine Xremesh_O2
 
-!> remeshing with an order 2 method, corrected to allow large CFL number - group version
-!!    @param[in]        ind_group   = coordinate of the current group of lines
-!!    @param[in]        gs          = size of groups (along X direction)
-!!    @param[in]        p_pos_adim  = adimensionned  particles position
-!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
-!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
-!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
-!!    @param[in,out]    scal        = scalar field to advect
-subroutine Xremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
+  !> remeshing with an order 2 method, corrected to allow large CFL number - group version
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Xremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
 
     ! Input/Output
     integer, dimension(2), intent(in)                                   :: ind_group
@@ -399,29 +399,29 @@ subroutine Xremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
     real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
     ! Other local variables 
     integer                                 :: proc_gap, gap! distance between my (mpi) coordonate and coordinate of the 
-                                                                ! processus associated to a given position
+    ! processus associated to a given position
     integer, dimension(gs(1),gs(2),2)       :: send_gap     ! distance between me and processus wich send me information
     integer, dimension(2)                   :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
     integer, dimension(:), allocatable      :: send_rank    ! rank of processus to wich I send information
     integer, dimension(2 , 2)               :: rece_gap     ! distance between me and processus to wich I send information
     integer                                 :: ind   ! indices
     integer, dimension(:,:), allocatable    :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the 
-                                                            ! current processus will send data during remeshing and for each of these lines the range 
-                                                            ! of mesh points from where it requiers the velocity values.
+    ! current processus will send data during remeshing and for each of these lines the range 
+    ! of mesh points from where it requiers the velocity values.
     integer, dimension(:,:), allocatable    :: rece_carto   ! same as abobve but for what I receive
     integer                                 :: min_size     ! minimal size of cartography(:,proc_gap)
     integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
     integer                                 :: ind_for_i1   ! where to read the first coordinate (i1) of the current line inside the cartography ?
     ! Variable used to remesh particles in a buffer
     real(WP),dimension(:),allocatable,target:: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
-                                                            ! sorted by receivers and not by coordinate.
+    ! sorted by receivers and not by coordinate.
     integer, dimension(:), allocatable      :: pos_in_buffer! buffer size
     type(real_pter),dimension(:),allocatable:: remesh_pter  ! pointer to send buffer in which scalar are sorted by line indice.
-                                                            ! sorted by receivers
-        
+    ! sorted by receivers
+
     ! I will receive data
-!    integer, dimension(gs(1),gs(2))         :: proc_min     ! smaller gap between me and the processes to where I send data
-!    integer, dimension(gs(1),gs(2))         :: proc_max     ! smaller gap between me and the processes to where I send data
+    !    integer, dimension(gs(1),gs(2))         :: proc_min     ! smaller gap between me and the processes to where I send data
+    !    integer, dimension(gs(1),gs(2))         :: proc_max     ! smaller gap between me and the processes to where I send data
 
     integer                                 :: i1, i2       ! indice of a line into the group
     ! Variable use to manage mpi communications
@@ -429,21 +429,21 @@ subroutine Xremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
     integer, dimension(:), allocatable      :: s_request_range! mpi communication request (handle) of nonblocking send
     integer                                 :: tag          ! mpi message tag
     integer                                 :: ierr         ! mpi error code
-    
+
     !  -- Compute ranges --
     where (bl_type(1,:,:))
-        ! First particle is a centered one
-        send_group_min = nint(p_pos_adim(1,:,:))-1
+       ! First particle is a centered one
+       send_group_min = nint(p_pos_adim(1,:,:))-1
     elsewhere
-        ! First particle is a left one
-        send_group_min = floor(p_pos_adim(1,:,:))-1
+       ! First particle is a left one
+       send_group_min = floor(p_pos_adim(1,:,:))-1
     end where
     where (bl_type(N_proc(direction)/bl_size +1,:,:))
-        ! Last particle is a centered one
-        send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+1
+       ! Last particle is a centered one
+       send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+1
     elsewhere
-        ! Last particle is a left one
-        send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+1
+       ! Last particle is a left one
+       send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+1
     end where
 
     ! ===== Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) =====
@@ -466,39 +466,39 @@ subroutine Xremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
     allocate(s_request_range(send_gap_abs(1):send_gap_abs(2)))
     min_size = 2 + gs(2)
     do proc_gap = send_gap_abs(1), send_gap_abs(2)
-        cartography(1,proc_gap) = 0
-        ! Use the cartography to know which lines are concerned
-        com_size = cartography(2,proc_gap)
-        ! Range I want - store into the cartography
-        gap = proc_gap*N_proc(direction)
-        ! Position in cartography(:,proc_gap) of the current i1 indice
-        ind_for_i1 = min_size
-        do i2 = 1, gs(2)
-            do ind = ind_for_i1+1, ind_for_i1 + cartography(2+i2,proc_gap), 2
-                do i1 = cartography(ind,proc_gap), cartography(ind+1,proc_gap)
-                    ! Interval start from:
-                    cartography(com_size+1,proc_gap) = max(send_group_min(i1,i2), gap+1) ! fortran => indice start from 0
-                    ! and ends at:
-                    cartography(com_size+2,proc_gap) = min(send_group_max(i1,i2), gap+N_proc(direction))
-                    ! update number of element to send
-                    cartography(1,proc_gap) = cartography(1,proc_gap) &
-                                & + cartography(com_size+2,proc_gap) &
-                                & - cartography(com_size+1,proc_gap) + 1
-                    com_size = com_size+2
-                end do
-            end do
-            ind_for_i1 = ind_for_i1 + cartography(2+i2,proc_gap)
-        end do
-        ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.                          
-        tag = compute_tag(ind_group, tag_velo_range, direction, proc_gap)
-        ! Send message
-        if (send_rank(proc_gap) /= D_rank(direction)) then
-            call mpi_Isend(cartography(1,proc_gap), com_size, MPI_INTEGER, send_rank(proc_gap), tag, &
-                & D_comm(direction), s_request_range(proc_gap),ierr)
-        else
-            ! No communication needed, I copy it directly !
-            rece_carto(:,-proc_gap) = cartography(1,proc_gap)
-        end if
+       cartography(1,proc_gap) = 0
+       ! Use the cartography to know which lines are concerned
+       com_size = cartography(2,proc_gap)
+       ! Range I want - store into the cartography
+       gap = proc_gap*N_proc(direction)
+       ! Position in cartography(:,proc_gap) of the current i1 indice
+       ind_for_i1 = min_size
+       do i2 = 1, gs(2)
+          do ind = ind_for_i1+1, ind_for_i1 + cartography(2+i2,proc_gap), 2
+             do i1 = cartography(ind,proc_gap), cartography(ind+1,proc_gap)
+                ! Interval start from:
+                cartography(com_size+1,proc_gap) = max(send_group_min(i1,i2), gap+1) ! fortran => indice start from 0
+                ! and ends at:
+                cartography(com_size+2,proc_gap) = min(send_group_max(i1,i2), gap+N_proc(direction))
+                ! update number of element to send
+                cartography(1,proc_gap) = cartography(1,proc_gap) &
+                     & + cartography(com_size+2,proc_gap) &
+                     & - cartography(com_size+1,proc_gap) + 1
+                com_size = com_size+2
+             end do
+          end do
+          ind_for_i1 = ind_for_i1 + cartography(2+i2,proc_gap)
+       end do
+       ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.                          
+       tag = compute_tag(ind_group, tag_velo_range, direction, proc_gap)
+       ! Send message
+       if (send_rank(proc_gap) /= D_rank(direction)) then
+          call mpi_Isend(cartography(1,proc_gap), com_size, MPI_INTEGER, send_rank(proc_gap), tag, &
+               & D_comm(direction), s_request_range(proc_gap),ierr)
+       else
+          ! No communication needed, I copy it directly !
+          rece_carto(:,-proc_gap) = cartography(1,proc_gap)
+       end if
     end do
 
     ! ===== Initialize the general buffer =====
@@ -509,67 +509,66 @@ subroutine Xremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
     allocate(pos_in_buffer(send_gap_abs(1):send_gap_abs(2)))
     pos_in_buffer(send_gap_abs(1)) = 1
     do proc_gap = send_gap_abs(1), send_gap_abs(2)-1
-        pos_in_buffer(proc_gap+1)= pos_in_buffer(proc_gap) + cartography(1,proc_gap)
+       pos_in_buffer(proc_gap+1)= pos_in_buffer(proc_gap) + cartography(1,proc_gap)
     end do
     allocate(send_buffer(pos_in_buffer(send_gap_abs(2)) &
-                & + cartography(1,send_gap_abs(2))))
-
-! XXX OPTIM : début de l'optim en court
-!   ! -- Allocate the buffer use to remesh all the lines --
-!   allocate(begin_proc(proc_min_abs:proc_max_abs))
-!   begin_proc(proc_min_abs) = 1
-!   do proc_gap = (proc_min_abs+1), proc_max_abs
-!       begin_proc(proc_gap) = begin_proc(proc_gap-1) + size_per_proc(proc_gap)
-!   end do
-!   allocate(send_buffer(begin_proc(proc_max_abs) + size_per_proc(proc_max_abs)-1))
-!   send_buffer = 0.0
-! XXX OPTIM : fin du codage en court
+         & + cartography(1,send_gap_abs(2))))
+
+    ! XXX OPTIM : début de l'optim en court
+    !   ! -- Allocate the buffer use to remesh all the lines --
+    !   allocate(begin_proc(proc_min_abs:proc_max_abs))
+    !   begin_proc(proc_min_abs) = 1
+    !   do proc_gap = (proc_min_abs+1), proc_max_abs
+    !       begin_proc(proc_gap) = begin_proc(proc_gap-1) + size_per_proc(proc_gap)
+    !   end do
+    !   allocate(send_buffer(begin_proc(proc_max_abs) + size_per_proc(proc_max_abs)-1))
+    !   send_buffer = 0.0
+    ! XXX OPTIM : fin du codage en court
 
     ! ===== Remeshing into the buffer by using pointer array =====
     do i2 = 1, gs(2)
-        do i1 = 1, gs(1)
-            send_j_min = send_group_min(i1,i2)
-            send_j_max = send_group_max(i1,i2)
-
-            ! -- Allocate remesh_pter --
-            allocate(remesh_pter(send_j_min:send_j_max))
-            do ind = send_j_min, send_j_max
-                proc_gap = floor(real(ind-1)/N_proc(direction))
-                remesh_pter(ind)%pter => send_buffer(pos_in_buffer(proc_gap))
-                pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
-            end do
-
-            ! -- Remesh the particles in the buffer --
-! XXX TODO
-        ! surcharge de AC_remesh pour prendre un tableau de real_pter
-            call AC_remesh_lambda2corrected(direction, p_pos_adim(:,i1,i2), scal(:,j+i1-1,k+i2-1), &
-                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, remesh_pter)
-            
-            ! -- Send the buffer to the matching processus and update the scalar field --
-            scal(:,j+i1-1,k+i2-1) = 0
-! Communiquer : envoyer à chacun sa partie du buffer et mettre à jour le
-! scalaire en utilisant la rece_carto
-!            çcall AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
-!                & rece_proc(:,i1,i2), send_buffer, scal(:,j+i1-1,k+i2-1))
-! XXX End TODO
-
-            ! Deallocate all field
-            deallocate(send_buffer)
-
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
+
+          ! -- Allocate remesh_pter --
+          allocate(remesh_pter(send_j_min:send_j_max))
+          do ind = send_j_min, send_j_max
+             proc_gap = floor(real(ind-1)/N_proc(direction))
+             remesh_pter(ind)%pter => send_buffer(pos_in_buffer(proc_gap))
+             pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+          end do
+
+          ! -- Remesh the particles in the buffer --
+          ! XXX TODO
+          ! surcharge de AC_remesh pour prendre un tableau de real_pter
+          call AC_remesh_lambda2corrected(direction, p_pos_adim(:,i1,i2), scal(:,j+i1-1,k+i2-1), &
+               & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, remesh_pter)
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(:,j+i1-1,k+i2-1) = 0
+          ! Communiquer : envoyer à chacun sa partie du buffer et mettre à jour le
+          ! scalaire en utilisant la rece_carto
+          !            çcall AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+          !                & rece_proc(:,i1,i2), send_buffer, scal(:,j+i1-1,k+i2-1))
+          ! XXX End TODO
+          deallocate(remesh_pter)
         end do
     end do
-
-end subroutine Xremesh_O2_group
-
-!> remeshing with an order 4 method, corrected to allow large CFL number - untagged particles
-!!    @param[in]        ind_group   = coordinate of the current group of lines
-!!    @param[in]        gs          = size of groups (along X direction)
-!!    @param[in]        p_pos_adim  = adimensionned particles position
-!!    @param[in]        bl_tag      = contains information about block (is it tagged ?)
-!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
-!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
-!!    @param[in,out]    scal          = scalar field to advect
-subroutine Xremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
+    ! Deallocate all field
+    deallocate(send_buffer)
+ 
+  end subroutine Xremesh_O2_group
+
+  !> remeshing with an order 4 method, corrected to allow large CFL number - untagged particles
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned particles position
+  !!    @param[in]        bl_tag      = contains information about block (is it tagged ?)
+  !!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in,out]    scal          = scalar field to advect
+  subroutine Xremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
 
     ! Input/Output
     integer, dimension(2), intent(in)                                   :: ind_group
@@ -580,30 +579,30 @@ subroutine Xremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
     logical, dimension(bl_nb(direction),gs(1),gs(2)), intent(in)        :: bl_tag       ! indice of tagged particles
     real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
     ! Other local variables 
-        ! Variables used to remesh particles ...
-        ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
-        ! designes something I send (resp. I receive).
+    ! Variables used to remesh particles ...
+    ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+    ! designes something I send (resp. I receive).
     real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
     integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
-                                                        ! I will receive data
+    ! I will receive data
     integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
     integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
     integer                             :: i1, i2       ! indice of a line into the group
 
     !  -- Compute ranges --
     where (bl_type(1,:,:))
-        ! First particle is a centered one
-        send_group_min = nint(p_pos_adim(1,:,:))-2
+       ! First particle is a centered one
+       send_group_min = nint(p_pos_adim(1,:,:))-2
     elsewhere
-        ! First particle is a left one
-        send_group_min = floor(p_pos_adim(1,:,:))-2
+       ! First particle is a left one
+       send_group_min = floor(p_pos_adim(1,:,:))-2
     end where
     where (bl_type(N_proc(direction)/bl_size +1,:,:))
-        ! Last particle is a centered one
-        send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+2
+       ! Last particle is a centered one
+       send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+2
     elsewhere
-        ! Last particle is a left one
-        send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+2
+       ! Last particle is a left one
+       send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+2
     end where
 
     ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
@@ -611,39 +610,39 @@ subroutine Xremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,j,k,scal)
     !call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, proc_min_abs, proc_max_abs, rece_proc)
 
     do i2 = 1, gs(2)
-        do i1 = 1, gs(1)
-            send_j_min = send_group_min(i1,i2)
-            send_j_max = send_group_max(i1,i2)
-
-            ! -- Allocate buffer for remeshing of local particles --
-            allocate(send_buffer(send_j_min:send_j_max))
-            send_buffer = 0.0;
-            
-            ! -- Remesh the particles in the buffer --
-            call AC_remesh_lambda4corrected(direction, p_pos_adim(:,i1,i2), scal(:,j+i1-1,k+i2-1), &
-                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
-            
-            ! -- Send the buffer to the matching processus and update the scalar field --
-            scal(:,j+i1-1,k+i2-1) = 0
-            call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
-                & rece_proc(:,i1,i2), send_buffer, scal(:,j+i1-1,k+i2-1))
-
-            ! Deallocate all field
-            deallocate(send_buffer)
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
 
-        end do
+          ! -- Allocate buffer for remeshing of local particles --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
+
+          ! -- Remesh the particles in the buffer --
+          call AC_remesh_lambda4corrected(direction, p_pos_adim(:,i1,i2), scal(:,j+i1-1,k+i2-1), &
+               & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(:,j+i1-1,k+i2-1) = 0
+          call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(:,j+i1-1,k+i2-1))
+
+          ! Deallocate all field
+          deallocate(send_buffer)
+
+       end do
     end do
 
-end subroutine Xremesh_O4
+  end subroutine Xremesh_O4
 
 
-!> remeshing with M'6 formula - No tag neither correction for large time steps.
-!!    @param[in]        ind_group   = coordinate of the current group of lines
-!!    @param[in]        gs          = size of groups (along X direction)
-!!    @param[in]        p_pos_adim  = adimensionned particles position
-!!    @param[in]        j,k         = indice of of the current line (y-coordinate and z-coordinate)
-!!    @param[in,out]    scal        = scalar field to advect
-subroutine Xremesh_Mprime6(ind_group, gs, p_pos_adim, j,k,scal)
+  !> remeshing with M'6 formula - No tag neither correction for large time steps.
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned particles position
+  !!    @param[in]        j,k         = indice of of the current line (y-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Xremesh_Mprime6(ind_group, gs, p_pos_adim, j,k,scal)
 
     ! Input/output
     integer, dimension(2), intent(in)                                   :: ind_group
@@ -652,12 +651,12 @@ subroutine Xremesh_Mprime6(ind_group, gs, p_pos_adim, j,k,scal)
     real(WP), dimension(N_proc(direction),gs(1),gs(2)), intent(in)      :: p_pos_adim   ! adimensionned particles position 
     real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
     ! Other local variables 
-        ! Variables used to remesh particles ...
-        ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
-        ! designes something I send (resp. I receive).
+    ! Variables used to remesh particles ...
+    ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+    ! designes something I send (resp. I receive).
     real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
     integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
-                                                        ! I will receive data
+    ! I will receive data
     integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
     integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
     integer                             :: i1, i2       ! indice of a line into the group
@@ -671,44 +670,44 @@ subroutine Xremesh_Mprime6(ind_group, gs, p_pos_adim, j,k,scal)
     call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc, 1)
 
     do i2 = 1, gs(2)
-        do i1 = 1, gs(1)
-            send_j_min = send_group_min(i1,i2)
-            send_j_max = send_group_max(i1,i2)
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
 
-            ! -- Allocate buffer for remeshing of local particles --
-            allocate(send_buffer(send_j_min:send_j_max))
-            send_buffer = 0.0;
+          ! -- Allocate buffer for remeshing of local particles --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
 
-            ! -- Remesh the particles in the buffer --
-            do i = 1, N_proc(direction), 1
-                call AC_remesh_Mprime6(p_pos_adim(i,i1,i2),scal(i,j+i1-1,k+i2-1), send_buffer)
-            end do
+          ! -- Remesh the particles in the buffer --
+          do i = 1, N_proc(direction), 1
+             call AC_remesh_Mprime6(p_pos_adim(i,i1,i2),scal(i,j+i1-1,k+i2-1), send_buffer)
+          end do
 
-            ! -- Send the buffer to the matching processus and update the scalar field --
-            scal(:,j+i1-1,k+i2-1) = 0
-            call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
-                & rece_proc(:,i1,i2), send_buffer, scal(:,j+i1-1,k+i2-1))
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(:,j+i1-1,k+i2-1) = 0
+          call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(:,j+i1-1,k+i2-1))
 
-            ! Deallocate all field
-            deallocate(send_buffer)
+          ! Deallocate all field
+          deallocate(send_buffer)
 
-        end do
+       end do
     end do
 
-end subroutine Xremesh_Mprime6
+  end subroutine Xremesh_Mprime6
 
 
-! ====================================================================
-! ====================    Initialize particle     ====================
-! ====================================================================
+  ! ====================================================================
+  ! ====================    Initialize particle     ====================
+  ! ====================================================================
 
-!> Creation and initialisation of a particle line (ie Y and Z coordinate are fixed)
-!!    @param[in]    Vx          = 3D velocity field
-!!    @param[in]    j           = Y-indice of the current line
-!!    @param[in]    k           = Z-indice of the current line
-!!    @param[out]   p_pos_adim  = adimensioned particles postion
-!!    @param[out]   p_V         = particle velocity 
-subroutine advecX_init_line(Vx, j, k, p_pos_adim, p_V)
+  !> Creation and initialisation of a particle line (ie Y and Z coordinate are fixed)
+  !!    @param[in]    Vx          = 3D velocity field
+  !!    @param[in]    j           = Y-indice of the current line
+  !!    @param[in]    k           = Z-indice of the current line
+  !!    @param[out]   p_pos_adim  = adimensioned particles postion
+  !!    @param[out]   p_V         = particle velocity 
+  subroutine advecX_init_line(Vx, j, k, p_pos_adim, p_V)
 
     ! Input/Output
     integer, intent(in)                                 :: j,k
@@ -718,21 +717,21 @@ subroutine advecX_init_line(Vx, j, k, p_pos_adim, p_V)
     integer                                     :: ind          ! indice
 
     do ind = 1, N_proc(direction)
-        p_pos_adim(ind) = ind
-        p_V(ind)        = Vx(ind,j,k)
+       p_pos_adim(ind) = ind
+       p_V(ind)        = Vx(ind,j,k)
     end do
 
-end subroutine advecX_init_line
+  end subroutine advecX_init_line
 
 
-!> Creation and initialisation of a group of particle line
-!!    @param[in]    Vx          = 3D velocity field
-!!    @param[in]    j           = Y-indice of the current line
-!!    @param[in]    k           = Z-indice of the current line
-!!    @param[in]    Gsize       = size of groups (along X direction)
-!!    @param[out]   p_pos_adim  = adimensioned particles postion
-!!    @param[out]   p_V         = particle velocity 
-subroutine advecX_init_group(Vx, j, k, Gsize, p_pos_adim, p_V)
+  !> Creation and initialisation of a group of particle line
+  !!    @param[in]    Vx          = 3D velocity field
+  !!    @param[in]    j           = Y-indice of the current line
+  !!    @param[in]    k           = Z-indice of the current line
+  !!    @param[in]    Gsize       = size of groups (along X direction)
+  !!    @param[out]   p_pos_adim  = adimensioned particles postion
+  !!    @param[out]   p_V         = particle velocity 
+  subroutine advecX_init_group(Vx, j, k, Gsize, p_pos_adim, p_V)
 
     ! Input/Output
     integer, intent(in)                                                     :: j,k
@@ -744,15 +743,15 @@ subroutine advecX_init_group(Vx, j, k, Gsize, p_pos_adim, p_V)
     integer                                     :: j_gp, k_gp   ! Y and Z indice of the current line in the group
 
     do k_gp = 1, Gsize(2)
-        do j_gp = 1, Gsize(1)
-            do ind = 1, N_proc(direction)
-                p_pos_adim(ind, j_gp, k_gp) = ind
-                p_V(ind, j_gp, k_gp)        = Vx(ind,j+j_gp-1,k+k_gp-1)
-            end do
-        end do
+       do j_gp = 1, Gsize(1)
+          do ind = 1, N_proc(direction)
+             p_pos_adim(ind, j_gp, k_gp) = ind
+             p_V(ind, j_gp, k_gp)        = Vx(ind,j+j_gp-1,k+k_gp-1)
+          end do
+       end do
     end do
 
-end subroutine advecX_init_group
+  end subroutine advecX_init_group
 
 end module advecX
 !> @}
diff --git a/HySoP/src/Unstable/LEGI/src/particles/advecY.f90 b/HySoP/src/Unstable/LEGI/src/particles/advecY.f90
index 2a2e3f24f..62ebd56ef 100644
--- a/HySoP/src/Unstable/LEGI/src/particles/advecY.f90
+++ b/HySoP/src/Unstable/LEGI/src/particles/advecY.f90
@@ -30,58 +30,58 @@
 
 module advecY
 
-    use precision
-    use advec_common
-
-    implicit none
-
-    ! ===== Public procedures =====
-    !> Generique procedure to advect the scalar with a particles solver
-    public                  :: advecY_calc
-    !----- (corrected) Remeshing method (these methods are set to public in validation purposes) -----
-    public                  :: Yremesh_O2       ! order 2
-    public                  :: Yremesh_O4       ! order 4
-    ! ----- Other remeshing formula -----
-    public                  :: Yremesh_Mprime6  
-
-    ! ===== Private porcedures =====
-    !> particles solver with remeshing method at order 2
-    private                 :: advecY_calc_line     ! remeshing method at order 2
-    private                 :: advecY_calc_group    ! remeshing method at order 2 - for a group of lines
-    private                 :: advecY_calc_Mprime6  ! M'6 remeshing method
-    ! Particles initialisation
-    private                 :: advecY_init      ! generic initialization of particle position, velocity.
-    private                 :: advecY_init_line ! initialisation for only one line of particles
-    private                 :: advecY_init_group! initialisation for a group of line of particles
-
-    ! ===== Private variables =====
-    !> current direction = alongY (to avoid redefinition and make more easy cut/paste)
-    integer, parameter, private      :: direction = 2
-
-    interface advecY_init
-        module procedure advecY_init_line, advecY_init_group
-    end interface advecY_init
+  use precision
+  use advec_remeshing_formula
+
+  implicit none
+
+  ! ===== Public procedures =====
+  !> Generique procedure to advect the scalar with a particles solver
+  public                  :: advecY_calc
+  !----- (corrected) Remeshing method (these methods are set to public in validation purposes) -----
+  public                  :: Yremesh_O2       ! order 2
+  public                  :: Yremesh_O4       ! order 4
+  ! ----- Other remeshing formula -----
+  public                  :: Yremesh_Mprime6  
+
+  ! ===== Private porcedures =====
+  !> particles solver with remeshing method at order 2
+  private                 :: advecY_calc_line     ! remeshing method at order 2
+  private                 :: advecY_calc_group    ! remeshing method at order 2 - for a group of lines
+  private                 :: advecY_calc_Mprime6  ! M'6 remeshing method
+  ! Particles initialisation
+  private                 :: advecY_init      ! generic initialization of particle position, velocity.
+  private                 :: advecY_init_line ! initialisation for only one line of particles
+  private                 :: advecY_init_group! initialisation for a group of line of particles
+
+  ! ===== Private variables =====
+  !> current direction = alongY (to avoid redefinition and make more easy cut/paste)
+  integer, parameter, private      :: direction = 2
+
+  interface advecY_init
+     module procedure advecY_init_line, advecY_init_group
+  end interface advecY_init
 
 contains
 
 
-! #####################################################################################
-! #####                                                                           #####
-! #####                         Public procedure                                  #####
-! #####                                                                           #####
-! #####################################################################################
+  ! #####################################################################################
+  ! #####                                                                           #####
+  ! #####                         Public procedure                                  #####
+  ! #####                                                                           #####
+  ! #####################################################################################
 
-! ====================================================================
-! ====================    Generic solveur         ====================
-! ====================================================================
+  ! ====================================================================
+  ! ====================    Generic solveur         ====================
+  ! ====================================================================
+
+  !> Scalar advection (this procedure call the right solver, depending on the simulation setup) 
+  !!    @param[in]        dt          = time step
+  !!    @param[in]        Vy          = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    SC          = scalar field to advect
+  !!    @param[in]        type_solver = scheme use for the advection (particle with order 2 or 4)
+  subroutine advecY_calc(dt, Vy, SC, type_solver)
 
-!> Scalar advection (this procedure call the right solver, depending on the simulation setup) 
-!!    @param[in]        dt          = time step
-!!    @param[in]        Vy          = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    SC          = scalar field to advect
-!!    @param[in]        type_solver = scheme use for the advection (particle with order 2 or 4)
-subroutine advecY_calc(dt, Vy, SC, type_solver)
-  
     ! Input/Output
     real(WP), intent(in)                                                :: dt
     real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vy
@@ -96,37 +96,37 @@ subroutine advecY_calc(dt, Vy, SC, type_solver)
 
     ! Call the right solver depending on the space order we want to.
     select case(type_solver)
-        case('p_O2')
-            call advecY_calc_group(dt, Vy, SC, group_size(direction,:))
-            !call advecY_calc_line(dt, Vy, SC)
-        case('p_O4')
-            call advecY_calc_O4(dt, Vy, SC, group_size(direction,:))
-        case('p_M6')
-            call advecY_calc_Mprime6(dt, Vy, SC, group_size(direction,:))
-        case default
-            call advecY_calc_group(dt, Vy, SC,group_size(direction,:))
+    case('p_O2')
+       call advecY_calc_group(dt, Vy, SC, group_size(direction,:))
+       !call advecY_calc_line(dt, Vy, SC)
+    case('p_O4')
+       call advecY_calc_O4(dt, Vy, SC, group_size(direction,:))
+    case('p_M6')
+       call advecY_calc_Mprime6(dt, Vy, SC, group_size(direction,:))
+    case default
+       call advecY_calc_group(dt, Vy, SC,group_size(direction,:))
     end select
 
 
-end subroutine advecY_calc
+  end subroutine advecY_calc
 
 
-! #####################################################################################
-! #####                                                                           #####
-! #####                         Private procedure                                 #####
-! #####                                                                           #####
-! #####################################################################################
+  ! #####################################################################################
+  ! #####                                                                           #####
+  ! #####                         Private procedure                                 #####
+  ! #####                                                                           #####
+  ! #####################################################################################
 
-! ====================================================================
-! ====================    Different solvers       ====================
-! ====================================================================
+  ! ====================================================================
+  ! ====================    Different solvers       ====================
+  ! ====================================================================
 
 
-!> Advection during a time step dt - order 2
-!!    @param[in]        dt      = time step
-!!    @param[in]        Vy      = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal3D   = scalar field to advect
-subroutine advecY_calc_line(dt,Vy,scal3D)
+  !> Advection during a time step dt - order 2
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vy      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D   = scalar field to advect
+  subroutine advecY_calc_line(dt,Vy,scal3D)
 
     ! input/output
     real(WP), intent(in)                                                :: dt
@@ -142,38 +142,38 @@ subroutine advecY_calc_line(dt,Vy,scal3D)
 
     ind_group = 0
     do k = 1, N_proc(3)
-        ind_group(2) = ind_group(2) + 1
-        ind_group(1) = 0
-        do i = 1, N_proc(1)
-            ind_group(1) = ind_group(1) + 1
-
-            ! ===== Init particles =====
-            call advecY_init(Vy, i, k, p_pos_adim, p_V)
-
-            ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_particle_velocity(dt, direction, ind_group, p_pos_adim, p_V)
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
-
-            ! ===== Remeshing =====
-            ! -- Pre-Remeshing: Determine blocks type and tag particles --
-            call AC_type_and_block(dt, direction, ind_group, p_V, bl_type, bl_tag)
-            ! -- Remeshing --
-            call Yremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,k,scal3D)
-        end do
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecY_init(Vy, i, k, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block(dt, direction, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Yremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,k,scal3D)
+       end do
     end do
 
 
-end subroutine advecY_calc_line
+  end subroutine advecY_calc_line
 
 
-!> Advection during a time step dt - order 2
-!!    @param[in]        dt      = time step
-!!    @param[in]        Vy      = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal3D  = scalar field to advect
-!!    @param[in]        gs      = size of group along current direction (ie along Y-axis)
-subroutine advecY_calc_group(dt,Vy,scal3D,gs)
+  !> Advection during a time step dt - order 2
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vy      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of group along current direction (ie along Y-axis)
+  subroutine advecY_calc_group(dt,Vy,scal3D,gs)
 
     ! input/output
     real(WP), intent(in)                                                :: dt
@@ -191,38 +191,38 @@ subroutine advecY_calc_group(dt,Vy,scal3D,gs)
     ind_group = 0
 
     do k = 1, N_proc(3), gs(2)
-        ind_group(2) = ind_group(2) + 1
-        ind_group(1) = 0
-        do i = 1, N_proc(1), gs(1)
-            ind_group(1) = ind_group(1) + 1
-
-            ! ===== Init particles =====
-            call advecY_init(Vy, i, k, gs, p_pos_adim, p_V)
-
-            ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
-
-            ! ===== Remeshing =====
-            ! -- Pre-Remeshing: Determine blocks type and tag particles --
-            call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
-            ! -- Remeshing --
-            call Yremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag, i,k,scal3D)
-
-        end do
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1), gs(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecY_init(Vy, i, k, gs, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Yremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag, i,k,scal3D)
+
+       end do
     end do
 
-end subroutine advecY_calc_group
+  end subroutine advecY_calc_group
 
 
-!> Advection during a time step dt - order 4
-!!    @param[in]        dt      = time step
-!!    @param[in]        Vy      = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal3D  = scalar field to advect
-!!    @param[in]        gs      = size of group along current direction (ie along Y-axis)
-subroutine advecY_calc_O4(dt,Vy,scal3D,gs)
+  !> Advection during a time step dt - order 4
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vy      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of group along current direction (ie along Y-axis)
+  subroutine advecY_calc_O4(dt,Vy,scal3D,gs)
 
     ! input/output
     real(WP), intent(in)                                                :: dt
@@ -240,38 +240,38 @@ subroutine advecY_calc_O4(dt,Vy,scal3D,gs)
     ind_group = 0
 
     do k = 1, N_proc(3), gs(2)
-        ind_group(2) = ind_group(2) + 1
-        ind_group(1) = 0
-        do i = 1, N_proc(1), gs(1)
-            ind_group(1) = ind_group(1) + 1
-
-            ! ===== Init particles =====
-            call advecY_init(Vy, i, k, gs, p_pos_adim, p_V)
-
-            ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
-
-            ! ===== Remeshing =====
-            ! -- Pre-Remeshing: Determine blocks type and tag particles --
-            call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
-            ! -- Remeshing --
-            call Yremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag, i,k,scal3D)
-
-        end do
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1), gs(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecY_init(Vy, i, k, gs, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Yremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag, i,k,scal3D)
+
+       end do
     end do
 
-end subroutine advecY_calc_O4
+  end subroutine advecY_calc_O4
 
 
-!> Advection during a time step dt - M'6, method without corrections.
-!!    @param[in]        dt      = time step
-!!    @param[in]        Vy      = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal3D  = scalar field to advect
-!!    @param[in]        gs      = size of group along current direction (ie along Y-axis)
-subroutine advecY_calc_Mprime6(dt,Vy,scal3D,gs)
+  !> Advection during a time step dt - M'6, method without corrections.
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vy      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of group along current direction (ie along Y-axis)
+  subroutine advecY_calc_Mprime6(dt,Vy,scal3D,gs)
 
     ! input/output
     real(WP), intent(in)                                                :: dt
@@ -287,42 +287,42 @@ subroutine advecY_calc_Mprime6(dt,Vy,scal3D,gs)
     ind_group = 0
 
     do k = 1, N_proc(3), gs(2)
-        ind_group(2) = ind_group(2) + 1
-        ind_group(1) = 0
-        do i = 1, N_proc(1), gs(1)
-            ind_group(1) = ind_group(1) + 1
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1), gs(1)
+          ind_group(1) = ind_group(1) + 1
 
-            ! ===== Init particles =====
-            call advecY_init(Vy, i, k, gs, p_pos_adim, p_V)
+          ! ===== Init particles =====
+          call advecY_init(Vy, i, k, gs, p_pos_adim, p_V)
 
-            ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
 
-            ! ===== Remeshing =====
-            ! -- Remeshing --
-            call Yremesh_Mprime6(ind_group, gs, p_pos_adim, i,k,scal3D)
+          ! ===== Remeshing =====
+          ! -- Remeshing --
+          call Yremesh_Mprime6(ind_group, gs, p_pos_adim, i,k,scal3D)
 
-        end do
+       end do
     end do
 
-end subroutine advecY_calc_Mprime6
+  end subroutine advecY_calc_Mprime6
 
 
-! ====================================================================
-! ====================   Remeshing subroutines    ====================
-! ====================================================================
+  ! ====================================================================
+  ! ====================   Remeshing subroutines    ====================
+  ! ====================================================================
 
-!> remeshing with an order 2 method, corrected to allow large CFL number - untagged particles
-!!    @param[in]        ind_group   = coordinate of the current group of lines
-!!    @param[in]        p_pos_adim  = adimensionned  particles position
-!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
-!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
-!!    @param[in]        i,k         = indice of of the current line (x-coordinate and z-coordinate)
-!!    @param[in,out]    scal        = scalar field to advect
-subroutine Yremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,k,scal)
+  !> remeshing with an order 2 method, corrected to allow large CFL number - untagged particles
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        i,k         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Yremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,k,scal)
 
     ! Input/Output
     integer, dimension(2), intent(in)                                   :: ind_group
@@ -333,28 +333,28 @@ subroutine Yremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,k,scal)
     real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
     ! Other local variables 
     real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it 
-                                                        ! to the right subdomain
+    ! to the right subdomain
     integer, dimension(2)               :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
-                                                        ! I will receive data
+    ! I will receive data
     integer                             :: proc_min     ! smaller gap between me and the processes to where I send data
     integer                             :: proc_max     ! smaller gap between me and the processes to where I send data
 
     !  -- Compute ranges for remeshing of local particles --
     if (bl_type(1)) then
-        ! First particle is a centered one
-        send_j_min = nint(p_pos_adim(1))-1
+       ! First particle is a centered one
+       send_j_min = nint(p_pos_adim(1))-1
     else
-        ! First particle is a left one
-        send_j_min = floor(p_pos_adim(1))-1
+       ! First particle is a left one
+       send_j_min = floor(p_pos_adim(1))-1
     end if
     if (bl_type(N_proc(direction)/bl_size +1)) then
-        ! Last particle is a centered one
-        send_j_max = nint(p_pos_adim(N_proc(direction)))+1
+       ! Last particle is a centered one
+       send_j_max = nint(p_pos_adim(N_proc(direction)))+1
     else
-        ! Last particle is a left one
-        send_j_max = floor(p_pos_adim(N_proc(direction)))+1
+       ! Last particle is a left one
+       send_j_max = floor(p_pos_adim(N_proc(direction)))+1
     end if
-        
+
     ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
     call AC_obtain_senders_line(send_j_min, send_j_max, direction, ind_group, proc_min, proc_max, rece_proc)
 
@@ -364,7 +364,7 @@ subroutine Yremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,k,scal)
 
     ! -- Remesh the particles in the buffer --
     call AC_remesh_lambda2corrected(direction, p_pos_adim, scal(i,:,k), bl_type, bl_tag, send_j_min, send_j_max, send_buffer)
-    
+
     ! -- Send the buffer to the matching processus and update the scalar field --
     scal(i,:,k) = 0
     call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min, proc_max, rece_proc, send_buffer, scal(i,:,k))
@@ -372,18 +372,18 @@ subroutine Yremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,k,scal)
     ! -- Deallocate all field --
     deallocate(send_buffer)
 
-end subroutine Yremesh_O2
+  end subroutine Yremesh_O2
 
 
-!> remeshing with an order 2 method, corrected to allow large CFL number - group version
-!!    @param[in]        ind_group   = coordinate of the current group of lines
-!!    @param[in]        gs          = size of groups (along X direction)
-!!    @param[in]        p_pos_adim  = adimensionned  particles position
-!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
-!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
-!!    @param[in]        i,k         = indice of of the current line (y-coordinate and z-coordinate)
-!!    @param[in,out]    scal        = scalar field to advect
-subroutine Yremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,k,scal)
+  !> remeshing with an order 2 method, corrected to allow large CFL number - group version
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        i,k         = indice of of the current line (y-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Yremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,k,scal)
 
     ! Input/Output
     integer, dimension(2), intent(in)                                   :: ind_group
@@ -397,67 +397,67 @@ subroutine Yremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,k,scal)
     ! Variable used to remesh particles in a buffer
     real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
     integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
-                                                        ! I will receive data
+    ! I will receive data
     integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
     integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
 
     integer                             :: i1, i2       ! indice of a line into the group
-    
+
     !  -- Compute ranges --
     where (bl_type(1,:,:))
-        ! First particle is a centered one
-        send_group_min = nint(p_pos_adim(1,:,:))-1
+       ! First particle is a centered one
+       send_group_min = nint(p_pos_adim(1,:,:))-1
     elsewhere
-        ! First particle is a left one
-        send_group_min = floor(p_pos_adim(1,:,:))-1
+       ! First particle is a left one
+       send_group_min = floor(p_pos_adim(1,:,:))-1
     end where
     where (bl_type(N_proc(direction)/bl_size +1,:,:))
-        ! Last particle is a centered one
-        send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+1
+       ! Last particle is a centered one
+       send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+1
     elsewhere
-        ! Last particle is a left one
-        send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+1
+       ! Last particle is a left one
+       send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+1
     end where
 
     ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
     call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc)
 
     do i2 = 1, gs(2)
-        do i1 = 1, gs(1)
-            send_j_min = send_group_min(i1,i2)
-            send_j_max = send_group_max(i1,i2)
-
-            ! -- Allocate buffer for remeshing of local particles --
-            allocate(send_buffer(send_j_min:send_j_max))
-            send_buffer = 0.0;
-
-            ! -- Remesh the particles in the buffer --
-            call AC_remesh_lambda2corrected(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,:,k+i2-1), &
-                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
-            
-            ! -- Send the buffer to the matching processus and update the scalar field --
-            scal(i+i1-1,:,k+i2-1) = 0
-            call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
-                & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,:,k+i2-1))
-
-            ! Deallocate all field
-            deallocate(send_buffer)
-
-        end do
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
+
+          ! -- Allocate buffer for remeshing of local particles --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
+
+          ! -- Remesh the particles in the buffer --
+          call AC_remesh_lambda2corrected(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,:,k+i2-1), &
+               & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(i+i1-1,:,k+i2-1) = 0
+          call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,:,k+i2-1))
+
+          ! Deallocate all field
+          deallocate(send_buffer)
+
+       end do
     end do
 
-end subroutine Yremesh_O2_group
+  end subroutine Yremesh_O2_group
 
 
-!> remeshing with an order 4 method, corrected to allow large CFL number - untagged particles
-!!    @param[in]        ind_group   = coordinate of the current group of lines
-!!    @param[in]        gs          = size of groups (along X direction)
-!!    @param[in]        p_pos_adim  = adimensionned particles position
-!!    @param[in]        bl_tag      = contains information about block (is it tagged ?)
-!!    @param[in]        i,k         = indice of of the current line (x-coordinate and z-coordinate)
-!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
-!!    @param[in,out]    scal        = scalar field to advect
-subroutine Yremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,k,scal)
+  !> remeshing with an order 4 method, corrected to allow large CFL number - untagged particles
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned particles position
+  !!    @param[in]        bl_tag      = contains information about block (is it tagged ?)
+  !!    @param[in]        i,k         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Yremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,k,scal)
 
     ! input/output
     integer, dimension(2), intent(in)                                   :: ind_group
@@ -468,69 +468,69 @@ subroutine Yremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,k,scal)
     logical, dimension(bl_nb(direction),gs(1),gs(2)), intent(in)        :: bl_tag       ! indice of tagged particles
     real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
     ! Other local variables 
-        ! Variables used to remesh particles ...
-        ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
-        ! designes something I send (resp. I receive).
+    ! Variables used to remesh particles ...
+    ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+    ! designes something I send (resp. I receive).
     real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
     integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
-                                                        ! I will receive data
+    ! I will receive data
     integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
     integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
     integer                             :: i1, i2       ! indice of a line into the group
 
     !  -- Compute ranges --
     where (bl_type(1,:,:))
-        ! First particle is a centered one
-        send_group_min = nint(p_pos_adim(1,:,:))-2
+       ! First particle is a centered one
+       send_group_min = nint(p_pos_adim(1,:,:))-2
     elsewhere
-        ! First particle is a left one
-        send_group_min = floor(p_pos_adim(1,:,:))-2
+       ! First particle is a left one
+       send_group_min = floor(p_pos_adim(1,:,:))-2
     end where
     where (bl_type(N_proc(direction)/bl_size +1,:,:))
-        ! Last particle is a centered one
-        send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+2
+       ! Last particle is a centered one
+       send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+2
     elsewhere
-        ! Last particle is a left one
-        send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+2
+       ! Last particle is a left one
+       send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+2
     end where
 
     ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
     call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc)
 
     do i2 = 1, gs(2)
-        do i1 = 1, gs(1)
-            send_j_min = send_group_min(i1,i2)
-            send_j_max = send_group_max(i1,i2)
-
-            ! -- Allocate buffer for remeshing of local particles --
-            allocate(send_buffer(send_j_min:send_j_max))
-            send_buffer = 0.0;
-        
-            ! -- Remesh the particles in the buffer --
-            call AC_remesh_lambda4corrected(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,:,k+i2-1), &
-                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
-    
-            ! -- Send the buffer to the matching processus and update the scalar field --
-            scal(i+i1-1,:,k+i2-1) = 0
-            call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
-                & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,:,k+i2-1))
-
-            ! Deallocate all field
-            deallocate(send_buffer)
-
-        end do
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
+
+          ! -- Allocate buffer for remeshing of local particles --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
+
+          ! -- Remesh the particles in the buffer --
+          call AC_remesh_lambda4corrected(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,:,k+i2-1), &
+               & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(i+i1-1,:,k+i2-1) = 0
+          call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,:,k+i2-1))
+
+          ! Deallocate all field
+          deallocate(send_buffer)
+
+       end do
     end do
 
-end subroutine Yremesh_O4
+  end subroutine Yremesh_O4
 
 
-!> remeshing with M'6 formula - No tag neither correction for large time steps.
-!!    @param[in]        ind_group   = coordinate of the current group of lines
-!!    @param[in]        gs          = size of groups (along X direction)
-!!    @param[in]        p_pos_adim  = adimensionned particles position
-!!    @param[in]        i,k         = indice of of the current line (x-coordinate and z-coordinate)
-!!    @param[in,out]    scal        = scalar field to advect
-subroutine Yremesh_Mprime6(ind_group, gs, p_pos_adim, i,k,scal)
+  !> remeshing with M'6 formula - No tag neither correction for large time steps.
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned particles position
+  !!    @param[in]        i,k         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Yremesh_Mprime6(ind_group, gs, p_pos_adim, i,k,scal)
 
     ! input/output
     integer, dimension(2), intent(in)                                   :: ind_group
@@ -539,12 +539,12 @@ subroutine Yremesh_Mprime6(ind_group, gs, p_pos_adim, i,k,scal)
     real(WP), dimension(N_proc(direction),gs(1),gs(2)), intent(in)      :: p_pos_adim   ! adimensionned particles position 
     real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
     ! Other local variables 
-        ! Variables used to remesh particles ...
-        ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
-        ! designes something I send (resp. I receive).
+    ! Variables used to remesh particles ...
+    ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+    ! designes something I send (resp. I receive).
     real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
     integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
-                                                        ! I will receive data
+    ! I will receive data
     integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
     integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
     integer                             :: i1, i2       ! indice of a line into the group
@@ -558,44 +558,44 @@ subroutine Yremesh_Mprime6(ind_group, gs, p_pos_adim, i,k,scal)
     call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc, 1)
 
     do i2 = 1, gs(2)
-        do i1 = 1, gs(1)
-            send_j_min = send_group_min(i1,i2)
-            send_j_max = send_group_max(i1,i2)
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
 
-            !  -- Allocate and initialize the buffer --
-            allocate(send_buffer(send_j_min:send_j_max))
-            send_buffer = 0.0;
+          !  -- Allocate and initialize the buffer --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
 
-            ! -- Remesh the particles in the buffer --
-            do ind_p = 1, N_proc(direction), 1
-                call AC_remesh_Mprime6(p_pos_adim(ind_p,i1,i2),scal(i+i1-1,ind_p,k+i2-1), send_buffer)
-            end do
+          ! -- Remesh the particles in the buffer --
+          do ind_p = 1, N_proc(direction), 1
+             call AC_remesh_Mprime6(p_pos_adim(ind_p,i1,i2),scal(i+i1-1,ind_p,k+i2-1), send_buffer)
+          end do
 
-            ! -- Send the buffer to the matching processus and update the scalar field --
-            scal(i+i1-1,:,k+i2-1) = 0
-            call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
-                & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,:,k+i2-1))
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(i+i1-1,:,k+i2-1) = 0
+          call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,:,k+i2-1))
 
-            ! Deallocate all field
-            deallocate(send_buffer)
+          ! Deallocate all field
+          deallocate(send_buffer)
 
-        end do
+       end do
     end do
 
-end subroutine Yremesh_Mprime6
+  end subroutine Yremesh_Mprime6
 
 
-! ====================================================================
-! ====================    Initialize particle     ====================
-! ====================================================================
+  ! ====================================================================
+  ! ====================    Initialize particle     ====================
+  ! ====================================================================
 
-!> Creation and initialisation of a particle line (ie X and Z coordinate are fixed)
-!!    @param[in]    Vy          = 3D velocity field
-!!    @param[in]    i           = X-indice of the current line
-!!    @param[in]    k           = Z-indice of the current line
-!!    @param[out]   p_pos_adim  = adimensioned particles postion
-!!    @param[out]   p_V         = particle velocity 
-subroutine advecY_init_line(Vy, i, k, p_pos_adim, p_V)
+  !> Creation and initialisation of a particle line (ie X and Z coordinate are fixed)
+  !!    @param[in]    Vy          = 3D velocity field
+  !!    @param[in]    i           = X-indice of the current line
+  !!    @param[in]    k           = Z-indice of the current line
+  !!    @param[out]   p_pos_adim  = adimensioned particles postion
+  !!    @param[out]   p_V         = particle velocity 
+  subroutine advecY_init_line(Vy, i, k, p_pos_adim, p_V)
 
     ! input/output
     integer, intent(in)                                 :: i,k
@@ -605,21 +605,21 @@ subroutine advecY_init_line(Vy, i, k, p_pos_adim, p_V)
     integer                                     :: ind          ! indice
 
     do ind = 1, N_proc(direction)
-        p_pos_adim(ind) = ind
-        p_V(ind)        = Vy(i,ind,k)
+       p_pos_adim(ind) = ind
+       p_V(ind)        = Vy(i,ind,k)
     end do
 
-end subroutine advecY_init_line
+  end subroutine advecY_init_line
 
 
-!> Creation and initialisation of a group of particle line
-!!    @param[in]    Vy          = 3D velocity field
-!!    @param[in]    i           = X-indice of the current line
-!!    @param[in]    k           = Z-indice of the current line
-!!    @param[in]    Gsize       = size of groups (along Y direction)
-!!    @param[out]   p_pos_adim  = adimensioned particles postion
-!!    @param[out]   p_V         = particle velocity 
-subroutine advecY_init_group(Vy, i, k, Gsize, p_pos_adim, p_V)
+  !> Creation and initialisation of a group of particle line
+  !!    @param[in]    Vy          = 3D velocity field
+  !!    @param[in]    i           = X-indice of the current line
+  !!    @param[in]    k           = Z-indice of the current line
+  !!    @param[in]    Gsize       = size of groups (along Y direction)
+  !!    @param[out]   p_pos_adim  = adimensioned particles postion
+  !!    @param[out]   p_V         = particle velocity 
+  subroutine advecY_init_group(Vy, i, k, Gsize, p_pos_adim, p_V)
 
     ! input/output
     integer, intent(in)                                                     :: i,k
@@ -631,15 +631,15 @@ subroutine advecY_init_group(Vy, i, k, Gsize, p_pos_adim, p_V)
     integer                                     :: i_gp, k_gp   ! Y and Z indice of the current line in the group
 
     do k_gp = 1, Gsize(2)
-        do i_gp = 1, Gsize(1)
-            do ind = 1, N_proc(direction)
-                p_pos_adim(ind, i_gp, k_gp) = ind
-                p_V(ind, i_gp, k_gp)        = Vy(i+i_gp-1,ind,k+k_gp-1)
-            end do
-        end do
+       do i_gp = 1, Gsize(1)
+          do ind = 1, N_proc(direction)
+             p_pos_adim(ind, i_gp, k_gp) = ind
+             p_V(ind, i_gp, k_gp)        = Vy(i+i_gp-1,ind,k+k_gp-1)
+          end do
+       end do
     end do
 
-end subroutine advecY_init_group
+  end subroutine advecY_init_group
 
 end module advecY
 !> @}
diff --git a/HySoP/src/Unstable/LEGI/src/particles/advecZ.f90 b/HySoP/src/Unstable/LEGI/src/particles/advecZ.f90
index b0ba5b6a9..8bbf5f4a9 100644
--- a/HySoP/src/Unstable/LEGI/src/particles/advecZ.f90
+++ b/HySoP/src/Unstable/LEGI/src/particles/advecZ.f90
@@ -30,66 +30,66 @@
 
 module advecZ
 
-    use precision
-    use advec_common
-
-    implicit none
-
-    ! ===== Private variables =====
-    ! Minimal and maximal indice of the buffer used in the different communication
-    !> minimal indice of the send buffer
-    !integer, public         :: send_j_min
-    !> maximal indice of the send buffer
-    !integer, public         :: send_j_max
-
-    ! ===== Public procedures =====
-    !> Generique procedure to advect the scalar with a particles solver
-    public                  :: advecZ_calc
-    !----- (corrected) Remeshing method (these methods are set to public in validation purposes) -----
-    public                  :: Zremesh_O2       ! order 2
-    public                  :: Zremesh_O4       ! order 4
-    ! ----- Other remeshing formula -----
-    public                  :: Zremesh_Mprime6  
-
-    ! ===== Private porcedures =====
-    ! particles solver with different remeshing formula
-    private                 :: advecZ_calc_O2       ! remeshing method at order 2
-    private                 :: advecZ_calc_O2_group ! remeshing method at order 2
-    private                 :: advecZ_calc_Mprime6  ! M'6 remeshing method
-    ! Particles initialisation
-    private                 :: advecZ_init      ! generic initialization of particle position, velocity.
-    private                 :: advecZ_init_line ! initialisation for only one line of particles
-    private                 :: advecZ_init_group! initialisation for a group of line of particles
-
-    ! ===== Private variable ====
-    !> Size of group of particles line along Z
-    integer, dimension(2), private  :: gpZ_size     
-    !> Current direction = 3 ie along Z
-    integer, parameter, private     :: direction = 3
-
-    interface advecZ_init
-        module procedure advecZ_init_line, advecZ_init_group
-    end interface advecZ_init
+  use precision
+  use advec_remeshing_formula
+
+  implicit none
+
+  ! ===== Private variables =====
+  ! Minimal and maximal indice of the buffer used in the different communication
+  !> minimal indice of the send buffer
+  !integer, public         :: send_j_min
+  !> maximal indice of the send buffer
+  !integer, public         :: send_j_max
+
+  ! ===== Public procedures =====
+  !> Generique procedure to advect the scalar with a particles solver
+  public                  :: advecZ_calc
+  !----- (corrected) Remeshing method (these methods are set to public in validation purposes) -----
+  public                  :: Zremesh_O2       ! order 2
+  public                  :: Zremesh_O4       ! order 4
+  ! ----- Other remeshing formula -----
+  public                  :: Zremesh_Mprime6  
+
+  ! ===== Private porcedures =====
+  ! particles solver with different remeshing formula
+  private                 :: advecZ_calc_O2       ! remeshing method at order 2
+  private                 :: advecZ_calc_O2_group ! remeshing method at order 2
+  private                 :: advecZ_calc_Mprime6  ! M'6 remeshing method
+  ! Particles initialisation
+  private                 :: advecZ_init      ! generic initialization of particle position, velocity.
+  private                 :: advecZ_init_line ! initialisation for only one line of particles
+  private                 :: advecZ_init_group! initialisation for a group of line of particles
+
+  ! ===== Private variable ====
+  !> Size of group of particles line along Z
+  integer, dimension(2), private  :: gpZ_size     
+  !> Current direction = 3 ie along Z
+  integer, parameter, private     :: direction = 3
+
+  interface advecZ_init
+     module procedure advecZ_init_line, advecZ_init_group
+  end interface advecZ_init
 
 contains
 
-! #####################################################################################
-! #####                                                                           #####
-! #####                         Public procedure                                  #####
-! #####                                                                           #####
-! #####################################################################################
-
-! ====================================================================
-! ====================    Generic solveur         ====================
-! ====================================================================
-
-!> Scalar advection (this procedure call the right solver, depending on the simulation setup) 
-!!    @param[in]        dt          = time step
-!!    @param[in]        Vz          = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    SC          = scalar field to advect
-!!    @param[in]        type_solver = scheme use for the advection (particle with order 2 or 4)
-subroutine advecZ_calc(dt, Vz, SC, type_solver)
-  
+  ! #####################################################################################
+  ! #####                                                                           #####
+  ! #####                         Public procedure                                  #####
+  ! #####                                                                           #####
+  ! #####################################################################################
+
+  ! ====================================================================
+  ! ====================    Generic solveur         ====================
+  ! ====================================================================
+
+  !> Scalar advection (this procedure call the right solver, depending on the simulation setup) 
+  !!    @param[in]        dt          = time step
+  !!    @param[in]        Vz          = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    SC          = scalar field to advect
+  !!    @param[in]        type_solver = scheme use for the advection (particle with order 2 or 4)
+  subroutine advecZ_calc(dt, Vz, SC, type_solver)
+
     ! Input/Output
     real(WP), intent(in)                                                :: dt
     real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in)    :: Vz
@@ -104,37 +104,37 @@ subroutine advecZ_calc(dt, Vz, SC, type_solver)
 
     ! Call the right solver depending on the space order we want to.
     select case(type_solver)
-        case('p_O2')
-            call advecZ_calc_O2_group(dt, Vz, SC,group_size(direction,:))
-            !call advecZ_calc_O2(dt, Vz, SC)
-        case('p_O4')
-            call advecZ_calc_O4(dt, Vz, SC,group_size(direction,:))
-        case('p_M6')
-            call advecZ_calc_Mprime6(dt, Vz, SC,group_size(direction,:))
-        case default
-            call advecZ_calc_O2_group(dt, Vz, SC,group_size(direction,:))
-            !call advecZ_calc_O2(dt, Vz, SC)
+    case('p_O2')
+       call advecZ_calc_O2_group(dt, Vz, SC,group_size(direction,:))
+       !call advecZ_calc_O2(dt, Vz, SC)
+    case('p_O4')
+       call advecZ_calc_O4(dt, Vz, SC,group_size(direction,:))
+    case('p_M6')
+       call advecZ_calc_Mprime6(dt, Vz, SC,group_size(direction,:))
+    case default
+       call advecZ_calc_O2_group(dt, Vz, SC,group_size(direction,:))
+       !call advecZ_calc_O2(dt, Vz, SC)
     end select
 
-end subroutine advecZ_calc
+  end subroutine advecZ_calc
 
 
-! #####################################################################################
-! #####                                                                           #####
-! #####                         Private procedure                                 #####
-! #####                                                                           #####
-! #####################################################################################
+  ! #####################################################################################
+  ! #####                                                                           #####
+  ! #####                         Private procedure                                 #####
+  ! #####                                                                           #####
+  ! #####################################################################################
 
-! ====================================================================
-! ====================    Different solvers       ====================
-! ====================================================================
+  ! ====================================================================
+  ! ====================    Different solvers       ====================
+  ! ====================================================================
 
 
-!> Advection during a time step dt - order 2
-!!    @param[in]        dt      = time step
-!!    @param[in]        Vz      = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal3D   = scalar field to advect
-subroutine advecZ_calc_O2(dt,Vz,scal3D)
+  !> Advection during a time step dt - order 2
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vz      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D   = scalar field to advect
+  subroutine advecZ_calc_O2(dt,Vz,scal3D)
 
     ! Input/Output
     real(WP), intent(in)                                                :: dt
@@ -150,38 +150,38 @@ subroutine advecZ_calc_O2(dt,Vz,scal3D)
 
     ind_group = 0
     do j = 1, N_proc(2)
-        ind_group(2) = ind_group(2) + 1
-        ind_group(1) = 0
-        do i = 1, N_proc(1)
-            ind_group(1) = ind_group(1) + 1
-
-            ! ===== Init particles =====
-            call advecZ_init(Vz, i, j, p_pos_adim, p_V)
-
-            ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_particle_velocity(dt, direction, ind_group, p_pos_adim, p_V)
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
-
-            ! ===== Remeshing =====
-            ! -- Pre-Remeshing: Determine blocks type and tag particles --
-            call AC_type_and_block(dt, direction, ind_group, p_V, bl_type, bl_tag)
-            ! -- Remeshing --
-            call Zremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,j,scal3D)
-
-        end do
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecZ_init(Vz, i, j, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block(dt, direction, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Zremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,j,scal3D)
+
+       end do
     end do
 
-end subroutine advecZ_calc_O2
+  end subroutine advecZ_calc_O2
 
 
-!> Advection during a time step dt - order 2
-!!    @param[in]        dt      = time step
-!!    @param[in]        Vz      = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal3D  = scalar field to advect
-!!    @param[in]        gs      = size of groups (along Z direction)
-subroutine advecZ_calc_O2_group(dt,Vz,scal3D,gs)
+  !> Advection during a time step dt - order 2
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vz      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of groups (along Z direction)
+  subroutine advecZ_calc_O2_group(dt,Vz,scal3D,gs)
 
     ! Input/Output
     real(WP), intent(in)                                                :: dt
@@ -199,39 +199,39 @@ subroutine advecZ_calc_O2_group(dt,Vz,scal3D,gs)
     ind_group = 0
 
     do j = 1, N_proc(2), gs(2)
-        ind_group(2) = ind_group(2) + 1
-        ind_group(1) = 0
-        do i = 1, N_proc(1), gs(1)
-            ind_group(1) = ind_group(1) + 1
-
-            ! ===== Init particles =====
-            call advecZ_init_group(Vz, i, j, gs, p_pos_adim, p_V)
-
-            ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
-
-            ! ===== Remeshing =====
-            ! -- Pre-Remeshing: Determine blocks type and tag particles --
-            call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
-            ! -- Remeshing --
-            call Zremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag, i,j,scal3D)
-
-        end do
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1), gs(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecZ_init_group(Vz, i, j, gs, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Zremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag, i,j,scal3D)
+
+       end do
     end do
 
 
-end subroutine advecZ_calc_O2_group
+  end subroutine advecZ_calc_O2_group
 
 
-!> Advection during a time step dt - order 4
-!!    @param[in]        dt      = time step
-!!    @param[in]        Vz      = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal3D  = scalar field to advect
-!!    @param[in]        gs      = size of groups (along Z direction)
-subroutine advecZ_calc_O4(dt,Vz,scal3D,gs)
+  !> Advection during a time step dt - order 4
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vz      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of groups (along Z direction)
+  subroutine advecZ_calc_O4(dt,Vz,scal3D,gs)
 
     ! Input/Output
     real(WP), intent(in)                                                :: dt
@@ -249,39 +249,39 @@ subroutine advecZ_calc_O4(dt,Vz,scal3D,gs)
     ind_group = 0
 
     do j = 1, N_proc(2), gs(2)
-        ind_group(2) = ind_group(2) + 1
-        ind_group(1) = 0
-        do i = 1, N_proc(1), gs(1)
-            ind_group(1) = ind_group(1) + 1
-
-            ! ===== Init particles =====
-            call advecZ_init_group(Vz, i, j, gs, p_pos_adim, p_V)
-
-            ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
-
-            ! ===== Remeshing =====
-            ! -- Pre-Remeshing: Determine blocks type and tag particles --
-            call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
-            ! -- Remeshing --
-            call Zremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag, i,j,scal3D)
-
-        end do
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1), gs(1)
+          ind_group(1) = ind_group(1) + 1
+
+          ! ===== Init particles =====
+          call advecZ_init_group(Vz, i, j, gs, p_pos_adim, p_V)
+
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+
+          ! ===== Remeshing =====
+          ! -- Pre-Remeshing: Determine blocks type and tag particles --
+          call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+          ! -- Remeshing --
+          call Zremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag, i,j,scal3D)
+
+       end do
     end do
 
 
-end subroutine advecZ_calc_O4
+  end subroutine advecZ_calc_O4
 
 
-!> Advection during a time step dt - M'6 remeshing formula - no tag, no type
-!!    @param[in]        dt      = time step
-!!    @param[in]        Vz      = velocity along y (could be discretised on a bigger mesh then the scalar)
-!!    @param[in,out]    scal3D  = scalar field to advect
-!!    @param[in]        gs      = size of groups (along Z direction)
-subroutine advecZ_calc_Mprime6(dt,Vz,scal3D,gs)
+  !> Advection during a time step dt - M'6 remeshing formula - no tag, no type
+  !!    @param[in]        dt      = time step
+  !!    @param[in]        Vz      = velocity along y (could be discretised on a bigger mesh then the scalar)
+  !!    @param[in,out]    scal3D  = scalar field to advect
+  !!    @param[in]        gs      = size of groups (along Z direction)
+  subroutine advecZ_calc_Mprime6(dt,Vz,scal3D,gs)
 
     ! Input/Output
     real(WP), intent(in)                                                :: dt
@@ -297,43 +297,43 @@ subroutine advecZ_calc_Mprime6(dt,Vz,scal3D,gs)
     ind_group = 0
 
     do j = 1, N_proc(2), gs(2)
-        ind_group(2) = ind_group(2) + 1
-        ind_group(1) = 0
-        do i = 1, N_proc(1), gs(1)
-            ind_group(1) = ind_group(1) + 1
+       ind_group(2) = ind_group(2) + 1
+       ind_group(1) = 0
+       do i = 1, N_proc(1), gs(1)
+          ind_group(1) = ind_group(1) + 1
 
-            ! ===== Init particles =====
-            call advecZ_init_group(Vz, i, j, gs, p_pos_adim, p_V)
+          ! ===== Init particles =====
+          call advecZ_init_group(Vz, i, j, gs, p_pos_adim, p_V)
 
-            ! ===== Advection =====
-            ! -- Compute velocity (with a RK2 scheme) --
-            call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
-            ! -- Advec particles --
-            p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
+          ! ===== Advection =====
+          ! -- Compute velocity (with a RK2 scheme) --
+          call AC_particle_velocity(dt, direction, gs, ind_group, p_pos_adim, p_V)
+          ! -- Advec particles --
+          p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
 
-            ! ===== Remeshing =====
-            ! -- Remeshing --
-            call Zremesh_Mprime6(ind_group, gs, p_pos_adim, i,j,scal3D)
+          ! ===== Remeshing =====
+          ! -- Remeshing --
+          call Zremesh_Mprime6(ind_group, gs, p_pos_adim, i,j,scal3D)
 
-        end do
+       end do
     end do
 
 
-end subroutine advecZ_calc_Mprime6
+  end subroutine advecZ_calc_Mprime6
 
 
-! ====================================================================
-! ====================   Remeshing subroutines    ====================
-! ====================================================================
+  ! ====================================================================
+  ! ====================   Remeshing subroutines    ====================
+  ! ====================================================================
 
-!> remeshing with an order 2 method, corrected to allow large CFL number - untagged particles
-!!    @param[in]        ind_group   = coordinate of the current group of lines
-!!    @param[in]        p_pos_adim  = adimensionned  particles position
-!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
-!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
-!!    @param[in]        i,j         = indice of of the current line (x-coordinate and z-coordinate)
-!!    @param[in,out]    scal        = scalar field to advect
-subroutine Zremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,j,scal)
+  !> remeshing with an order 2 method, corrected to allow large CFL number - untagged particles
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        i,j         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Zremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,j,scal)
 
     ! Input/Output
     integer, dimension(2), intent(in)                                   :: ind_group
@@ -344,39 +344,39 @@ subroutine Zremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,j,scal)
     real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
     ! Other local variables 
     real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it
-                                                        ! to the right subdomain
+    ! to the right subdomain
     integer, dimension(2)               :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
-                                                        ! I will receive data
+    ! I will receive data
     integer                             :: proc_min     ! smaller gap between me and the processes to where I send data
     integer                             :: proc_max     ! smaller gap between me and the processes to where I send data
-    
+
 
     !  -- Compute ranges for remeshing of local particles --
     if (bl_type(1)) then
-        ! First particle is a centered one
-        send_j_min = nint(p_pos_adim(1))-1
+       ! First particle is a centered one
+       send_j_min = nint(p_pos_adim(1))-1
     else
-        ! First particle is a left one
-        send_j_min = floor(p_pos_adim(1))-1
+       ! First particle is a left one
+       send_j_min = floor(p_pos_adim(1))-1
     end if
     if (bl_type(N_proc(direction)/bl_size +1)) then
-        ! Last particle is a centered one
-        send_j_max = nint(p_pos_adim(N_proc(direction)))+1
+       ! Last particle is a centered one
+       send_j_max = nint(p_pos_adim(N_proc(direction)))+1
     else
-        ! Last particle is a left one
-        send_j_max = floor(p_pos_adim(N_proc(direction)))+1
+       ! Last particle is a left one
+       send_j_max = floor(p_pos_adim(N_proc(direction)))+1
     end if
-        
+
     ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
     call AC_obtain_senders_line(send_j_min, send_j_max, direction, ind_group, proc_min, proc_max, rece_proc)
-        
+
     !  -- Allocate and initialize the buffer --
     allocate(send_buffer(send_j_min:send_j_max))
     send_buffer = 0.0;
 
     ! -- Remesh the particles in the buffer --
     call AC_remesh_lambda2corrected(direction, p_pos_adim, scal(i,j,:), bl_type, bl_tag, send_j_min, send_j_max, send_buffer)
-    
+
     ! -- Send the buffer to the matching processus and update the scalar field --
     scal(i,j,:) = 0
     call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min, proc_max, rece_proc, send_buffer, scal(i,j,:))
@@ -384,18 +384,18 @@ subroutine Zremesh_O2(ind_group, p_pos_adim, bl_type, bl_tag,i,j,scal)
     ! Deallocate all field
     deallocate(send_buffer)
 
-end subroutine Zremesh_O2
+  end subroutine Zremesh_O2
 
 
-!> remeshing with an order 2 method, corrected to allow large CFL number - group version
-!!    @param[in]        ind_group   = coordinate of the current group of lines
-!!    @param[in]        gs          = size of groups (along X direction)
-!!    @param[in]        p_pos_adim  = adimensionned  particles position
-!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
-!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
-!!    @param[in]        i,j         = indice of of the current line (x-coordinate and z-coordinate)
-!!    @param[in,out]    scal        = scalar field to advect
-subroutine Zremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,j,scal)
+  !> remeshing with an order 2 method, corrected to allow large CFL number - group version
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned  particles position
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+  !!    @param[in]        i,j         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Zremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,j,scal)
 
     ! Input/Output
     integer, dimension(2), intent(in)                                   :: ind_group
@@ -409,67 +409,67 @@ subroutine Zremesh_O2_group(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,j,scal)
     ! Variable used to remesh particles in a buffer
     real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
     integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
-                                                        ! I will receive data
+    ! I will receive data
     integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
     integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
 
     integer                             :: i1, i2       ! indice of a line into the group
-    
+
     !  -- Compute ranges --
     where (bl_type(1,:,:))
-        ! First particle is a centered one
-        send_group_min = nint(p_pos_adim(1,:,:))-1
+       ! First particle is a centered one
+       send_group_min = nint(p_pos_adim(1,:,:))-1
     elsewhere
-        ! First particle is a left one
-        send_group_min = floor(p_pos_adim(1,:,:))-1
+       ! First particle is a left one
+       send_group_min = floor(p_pos_adim(1,:,:))-1
     end where
     where (bl_type(N_proc(direction)/bl_size +1,:,:))
-        ! Last particle is a centered one
-        send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+1
+       ! Last particle is a centered one
+       send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+1
     elsewhere
-        ! Last particle is a left one
-        send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+1
+       ! Last particle is a left one
+       send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+1
     end where
 
     ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
     call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc)
 
     do i2 = 1, gs(2)
-        do i1 = 1, gs(1)
-            send_j_min = send_group_min(i1,i2)
-            send_j_max = send_group_max(i1,i2)
-
-            ! -- Allocate buffer for remeshing of local particles --
-            allocate(send_buffer(send_j_min:send_j_max))
-            send_buffer = 0.0;
-
-            ! -- Remesh the particles in the buffer --
-            call AC_remesh_lambda2corrected(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,j+i2-1,:), &
-                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
-            
-            ! -- Send the buffer to the matching processus and update the scalar field --
-            scal(i+i1-1,j+i2-1,:) = 0
-            call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
-                & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,j+i2-1,:))
-
-            ! Deallocate all field
-            deallocate(send_buffer)
-
-        end do
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
+
+          ! -- Allocate buffer for remeshing of local particles --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
+
+          ! -- Remesh the particles in the buffer --
+          call AC_remesh_lambda2corrected(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,j+i2-1,:), &
+               & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(i+i1-1,j+i2-1,:) = 0
+          call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,j+i2-1,:))
+
+          ! Deallocate all field
+          deallocate(send_buffer)
+
+       end do
     end do
 
-end subroutine Zremesh_O2_group
+  end subroutine Zremesh_O2_group
 
 
-!> remeshing with an order 4 method, corrected to allow large CFL number - untagged particles
-!!    @param[in]        ind_group   = coordinate of the current group of lines
-!!    @param[in]        gs          = size of groups (along X direction)
-!!    @param[in]        p_pos_adim  = adimensionned particles position
-!!    @param[in]        bl_tag      = contains information about block (is it tagged ?)
-!!    @param[in]        i,j         = indice of of the current line (x-coordinate and z-coordinate)
-!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
-!!    @param[in,out]    scal          = scalar field to advect
-subroutine Zremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,j,scal)
+  !> remeshing with an order 4 method, corrected to allow large CFL number - untagged particles
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned particles position
+  !!    @param[in]        bl_tag      = contains information about block (is it tagged ?)
+  !!    @param[in]        i,j         = indice of of the current line (x-coordinate and z-coordinate)
+  !!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+  !!    @param[in,out]    scal          = scalar field to advect
+  subroutine Zremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,j,scal)
 
     ! Input/Output
     integer, dimension(2), intent(in)                                   :: ind_group
@@ -480,69 +480,69 @@ subroutine Zremesh_O4(ind_group, gs, p_pos_adim, bl_type, bl_tag,i,j,scal)
     logical, dimension(bl_nb(direction),gs(1),gs(2)), intent(in)        :: bl_tag       ! indice of tagged particles
     real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
     ! Other local variables
-        ! Variables used to remesh particles ...
-        ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
-        ! designes something I send (resp. I receive).
+    ! Variables used to remesh particles ...
+    ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+    ! designes something I send (resp. I receive).
     real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
     integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
-                                                        ! I will receive data
+    ! I will receive data
     integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
     integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
     integer                             :: i1, i2       ! indice of a line into the group
 
     !  -- Compute ranges --
     where (bl_type(1,:,:))
-        ! First particle is a centered one
-        send_group_min = nint(p_pos_adim(1,:,:))-2
+       ! First particle is a centered one
+       send_group_min = nint(p_pos_adim(1,:,:))-2
     elsewhere
-        ! First particle is a left one
-        send_group_min = floor(p_pos_adim(1,:,:))-2
+       ! First particle is a left one
+       send_group_min = floor(p_pos_adim(1,:,:))-2
     end where
     where (bl_type(N_proc(direction)/bl_size +1,:,:))
-        ! Last particle is a centered one
-        send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+2
+       ! Last particle is a centered one
+       send_group_max = nint(p_pos_adim(N_proc(direction),:,:))+2
     elsewhere
-        ! Last particle is a left one
-        send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+2
+       ! Last particle is a left one
+       send_group_max = floor(p_pos_adim(N_proc(direction),:,:))+2
     end where
 
     ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
     call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc)
 
     do i2 = 1, gs(2)
-        do i1 = 1, gs(1)
-            send_j_min = send_group_min(i1,i2)
-            send_j_max = send_group_max(i1,i2)
-
-            ! -- Allocate buffer for remeshing of local particles --
-            allocate(send_buffer(send_j_min:send_j_max))
-            send_buffer = 0.0;
-        
-            ! -- Remesh the particles in the buffer --
-            call AC_remesh_lambda4corrected(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,j+i2-1,:), &
-                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
-            
-            ! -- Send the buffer to the matching processus and update the scalar field --
-            scal(i+i1-1,j+i2-1,:) = 0
-            call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
-                & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,j+i2-1,:))
-
-            ! Deallocate all field
-            deallocate(send_buffer)
-            
-        end do
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
+
+          ! -- Allocate buffer for remeshing of local particles --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
+
+          ! -- Remesh the particles in the buffer --
+          call AC_remesh_lambda4corrected(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,j+i2-1,:), &
+               & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(i+i1-1,j+i2-1,:) = 0
+          call AC_bufferToScalar(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,j+i2-1,:))
+
+          ! Deallocate all field
+          deallocate(send_buffer)
+
+       end do
     end do
 
-end subroutine Zremesh_O4
+  end subroutine Zremesh_O4
 
 
-!> remeshing with M'6 formula - No tag neither correction for large time steps.
-!!    @param[in]        ind_group   = coordinate of the current group of lines
-!!    @param[in]        gs          = size of groups (along X direction)
-!!    @param[in]        p_pos_adim  = adimensionned particles position
-!!    @param[in]        i,j         = indice of of the current line (x-coordinate and y-coordinate)
-!!    @param[in,out]    scal        = scalar field to advect
-subroutine Zremesh_Mprime6(ind_group, gs, p_pos_adim, i,j,scal)
+  !> remeshing with M'6 formula - No tag neither correction for large time steps.
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        gs          = size of groups (along X direction)
+  !!    @param[in]        p_pos_adim  = adimensionned particles position
+  !!    @param[in]        i,j         = indice of of the current line (x-coordinate and y-coordinate)
+  !!    @param[in,out]    scal        = scalar field to advect
+  subroutine Zremesh_Mprime6(ind_group, gs, p_pos_adim, i,j,scal)
 
     ! input/output
     integer, dimension(2), intent(in)                                   :: ind_group
@@ -551,12 +551,12 @@ subroutine Zremesh_Mprime6(ind_group, gs, p_pos_adim, i,j,scal)
     real(WP), dimension(N_proc(direction),gs(1),gs(2)), intent(in)      :: p_pos_adim   ! adimensionned particles position 
     real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
     ! Other local variables 
-        ! Variables used to remesh particles ...
-        ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
-        ! designes something I send (resp. I receive).
+    ! Variables used to remesh particles ...
+    ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+    ! designes something I send (resp. I receive).
     real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
     integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
-                                                        ! I will receive data
+    ! I will receive data
     integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
     integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
     integer                             :: i1, i2       ! indice of a line into the group
@@ -570,44 +570,44 @@ subroutine Zremesh_Mprime6(ind_group, gs, p_pos_adim, i,j,scal)
     call AC_obtain_senders(direction, gs, ind_group, proc_min, proc_max, rece_proc, 1)
 
     do i2 = 1, gs(2)
-        do i1 = 1, gs(1)
-            send_j_min = send_group_min(i1,i2)
-            send_j_max = send_group_max(i1,i2)
+       do i1 = 1, gs(1)
+          send_j_min = send_group_min(i1,i2)
+          send_j_max = send_group_max(i1,i2)
 
-            !  -- Allocate and initialize the buffer --
-            allocate(send_buffer(send_j_min:send_j_max))
-            send_buffer = 0.0;
+          !  -- Allocate and initialize the buffer --
+          allocate(send_buffer(send_j_min:send_j_max))
+          send_buffer = 0.0;
 
-            ! -- Remesh the particles in the buffer --
-            do ind_p = 1, N_proc(direction), 1
-                call AC_remesh_Mprime6(p_pos_adim(ind_p,i1,i2),scal(i+i1-1,j+i2-1, ind_p), send_buffer)
-            end do
+          ! -- Remesh the particles in the buffer --
+          do ind_p = 1, N_proc(direction), 1
+             call AC_remesh_Mprime6(p_pos_adim(ind_p,i1,i2),scal(i+i1-1,j+i2-1, ind_p), send_buffer)
+          end do
 
-            ! -- Send the buffer to the matching processus and update the scalar field --
-            scal(i+i1-1,j+i2-1,:) = 0
-            call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
-                & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,j+i2-1,:))
+          ! -- Send the buffer to the matching processus and update the scalar field --
+          scal(i+i1-1,j+i2-1,:) = 0
+          call AC_bufferToScalar(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+               & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,j+i2-1,:))
 
-            ! Deallocate all field
-            deallocate(send_buffer)
+          ! Deallocate all field
+          deallocate(send_buffer)
 
-        end do
+       end do
     end do
 
-end subroutine Zremesh_Mprime6
+  end subroutine Zremesh_Mprime6
 
 
-! ====================================================================
-! ====================    Initialize particle     ====================
-! ====================================================================
+  ! ====================================================================
+  ! ====================    Initialize particle     ====================
+  ! ====================================================================
 
-!> Creation and initialisation of a particle line (ie X and Y coordinate are fixed)
-!!    @param[in]    Vz          = 3D velocity field
-!!    @param[in]    i           = X-indice of the current line
-!!    @param[in]    j           = Y-indice of the current line
-!!    @param[out]   p_pos_adim  = adimensioned particles postion
-!!    @param[out]   p_V         = particle velocity 
-subroutine advecZ_init_line(Vz, i, j, p_pos_adim, p_V)
+  !> Creation and initialisation of a particle line (ie X and Y coordinate are fixed)
+  !!    @param[in]    Vz          = 3D velocity field
+  !!    @param[in]    i           = X-indice of the current line
+  !!    @param[in]    j           = Y-indice of the current line
+  !!    @param[out]   p_pos_adim  = adimensioned particles postion
+  !!    @param[out]   p_V         = particle velocity 
+  subroutine advecZ_init_line(Vz, i, j, p_pos_adim, p_V)
 
     ! Input/Output
     integer, intent(in)                                 :: i,j
@@ -617,21 +617,21 @@ subroutine advecZ_init_line(Vz, i, j, p_pos_adim, p_V)
     integer                                             :: ind  ! indice
 
     do ind = 1, N_proc(direction)
-        p_pos_adim(ind) = ind
-        p_V(ind)        = Vz(i,j,ind)
+       p_pos_adim(ind) = ind
+       p_V(ind)        = Vz(i,j,ind)
     end do
 
-end subroutine advecZ_init_line
+  end subroutine advecZ_init_line
 
 
-!> Creation and initialisation of a group of particle line
-!!    @param[in]    Vz          = 3D velocity field
-!!    @param[in]    i           = X-indice of the current line
-!!    @param[in]    j           = Y-indice of the current line
-!!    @param[in]    Gsize       = size of groups (along Z direction)
-!!    @param[out]   p_pos_adim  = adimensioned particles postion
-!!    @param[out]   p_V         = particle velocity 
-subroutine advecZ_init_group(Vz, i, j, Gsize, p_pos_adim, p_V)
+  !> Creation and initialisation of a group of particle line
+  !!    @param[in]    Vz          = 3D velocity field
+  !!    @param[in]    i           = X-indice of the current line
+  !!    @param[in]    j           = Y-indice of the current line
+  !!    @param[in]    Gsize       = size of groups (along Z direction)
+  !!    @param[out]   p_pos_adim  = adimensioned particles postion
+  !!    @param[out]   p_V         = particle velocity 
+  subroutine advecZ_init_group(Vz, i, j, Gsize, p_pos_adim, p_V)
 
     ! Input/Output
     integer, intent(in)                                                     :: i,j
@@ -643,15 +643,15 @@ subroutine advecZ_init_group(Vz, i, j, Gsize, p_pos_adim, p_V)
     integer                                     :: i_gp, j_gp   ! X and Y indice of the current line in the group
 
     do j_gp = 1, Gsize(2)
-        do i_gp = 1, Gsize(1)
-            do ind = 1, N_proc(direction)
-                p_pos_adim(ind, i_gp, j_gp) = ind
-                p_V(ind, i_gp, j_gp)        = Vz(i+(i_gp-1),j+(j_gp-1), ind)
-            end do
-        end do
+       do i_gp = 1, Gsize(1)
+          do ind = 1, N_proc(direction)
+             p_pos_adim(ind, i_gp, j_gp) = ind
+             p_V(ind, i_gp, j_gp)        = Vz(i+(i_gp-1),j+(j_gp-1), ind)
+          end do
+       end do
     end do
 
-end subroutine advecZ_init_group
+  end subroutine advecZ_init_group
 
 end module advecZ
 !> @}
diff --git a/HySoP/src/Unstable/LEGI/src/particles/advec_common.f90 b/HySoP/src/Unstable/LEGI/src/particles/advec_common.f90
index e0a059784..48433ced4 100644
--- a/HySoP/src/Unstable/LEGI/src/particles/advec_common.f90
+++ b/HySoP/src/Unstable/LEGI/src/particles/advec_common.f90
@@ -32,7 +32,6 @@
 module advec_common
 
   use precision
-  use string
   use cart_topology
   use advec_variables!, only : real_pter
   use advec_common_line
@@ -62,8 +61,7 @@ module advec_common
   private                             :: AC_obtain_senders_group
   private                             :: AC_obtain_senders_com
   public                              :: AC_bufferToScalar
-  private                             :: AC_bufferToScalar_group
-
+  
   interface AC_particle_velocity
      module procedure AC_particle_velocity_line, AC_velocity_interpol_group
   end interface AC_particle_velocity
@@ -78,9 +76,9 @@ module advec_common
      module procedure AC_obtain_senders_line, AC_obtain_senders_com, &
           & AC_obtain_senders_group
   end interface AC_obtain_senders
-
+  
   interface AC_bufferToScalar
-     module procedure AC_bufferToScalar_line, AC_bufferToScalar_group
+     module procedure AC_bufferToScalar_line
   end interface AC_bufferToScalar
 
 
@@ -476,7 +474,7 @@ contains
           tag = compute_tag(ind_group, tag_bufToScal_range, direction, proc_gap)
           ! Send message
           call mpi_Isend(cartography(1,proc_gap), com_size, MPI_INTEGER, send_rank(proc_gap), tag, &
-               & D_comm(direction), s_request_bis(proc_gap),ierr)
+               D_comm(direction), s_request_bis(proc_gap),ierr)
        end if
     end do
 
@@ -866,8 +864,7 @@ contains
   !!    formula are required. In those points, it tagg block transition (ie the end of
   !!    the current block and the beginning of the following one) in order to indicate
   !!    that corrected weigth have to be used during the remeshing.
-  subroutine AC_type_and_block_group(dt, dir, gp_s, ind_group, p_V, &
-       & bl_type, bl_tag)
+  subroutine AC_type_and_block_group(dt, dir, gp_s, ind_group, p_V,bl_type, bl_tag)
 
     real(WP), intent(in)                                        :: dt           ! time step
     integer, intent(in)                                         :: dir
@@ -1377,7 +1374,7 @@ contains
   !!    correspond do scheme without bloc of particle involving velocity variation
   !!    contrainsts to avoid that the distance between to particle grows (or dimishes)
   !!    too much.
-  subroutine AC_obtain_senders_com(direction, gp_s, ind_group, proc_min, proc_max, rece_proc,com)
+  subroutine AC_obtain_senders_com(direction, gp_s, ind_group, proc_min, proc_max, rece_proc, com)
     ! XXX Work only for periodic condition. See AC_obtain_senders. Adapt it for
     ! other condition must be more easy.
 
@@ -1386,7 +1383,7 @@ contains
     integer, dimension(2), intent(in)               :: ind_group
     integer(kind=4), dimension(:,:), intent(out)    :: proc_min, proc_max
     integer, dimension(:,:,:), intent(out)          :: rece_proc
-    integer, dimension(2),intent(in)               :: gp_s
+    integer, dimension(2), intent(in)               :: gp_s
     integer, intent(in)                             :: com
     ! Other local variable
     integer(kind=4)                                 :: proc_gap         ! gap between a processus coordinate (along the current 
@@ -1533,147 +1530,6 @@ contains
 
   end subroutine AC_obtain_senders_com
 
-  !> Common procedure for remeshing wich perform all the communcation and provide
-  !! the update scalar field.
-  !!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
-  !!    @param[in]        gp_s        = size of group of line along the current direction
-  !!    @param[in]        ind_group   = coordinate of the current group of lines
-  !!    @param[in]        send_buffer = buffer use to remesh the scalar before to send it to the right subdomain
-  !!    @param[in,out]    scal1D      = mono-dimensionnal scalar field to advect
-  !!                                  (XXX todo : decide if extraction from the 3D or pointeur)
-  !! @details
-  !!    Remeshing are done in a local buffer. This subroutine distribute this buffer 
-  !!    to the right processes, receive the buffer send and update the scalar field.
-  subroutine AC_bufferToScalar_group(direction, gp_s, ind_group, send_buffer, scal1D)
-
-    ! Input/Ouptut
-    integer, intent(in)                                                         :: direction
-    integer, dimension(2), intent(in)                                           :: gp_s
-    integer, dimension(2), intent(in)                                           :: ind_group
-    real(WP), dimension(send_j_min:send_j_max), intent(in)                      :: send_buffer
-    !XXX - ca ne devrait pas bugger sans les range
-    real(WP), dimension(N_proc(direction)), intent(inout)                       :: scal1D
-
-    ! Variables used to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
-    ! design something I send (resp. I receive).
-    integer                             :: i            ! table indice
-    integer                             :: proc_gap     ! gap between my Y-coordinate and the one of the processus 
-    integer, dimension(2)               :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
-    ! I will receive data
-    integer                             :: proc_min     ! smaller gap between me and the processes to where I send data
-    integer                             :: proc_max     ! smaller gap between me and the processes to where I send data
-    real(WP), dimension(:), allocatable :: rece_buffer  ! buffer use to stock received scalar field
-    integer                             :: send_gap     ! number of mesh between my and another processus
-    integer,dimension(:,:), allocatable :: rece_range   ! range of (local) indice where the received scalar field has to be save
-    integer, dimension(2)               :: send_range   ! range of (local) indice where the send scalar field has to be save
-    integer, dimension(:), allocatable  :: rece_request ! mpi communication request (handle) of nonblocking receive
-    integer, dimension(:), allocatable  :: rece_rank    ! rank of processus from wich I receive data
-    integer                             :: send_rank    ! rank of processus to which I send data
-    integer                             :: rankP        ! rank used in mpi_cart_shift
-    integer, dimension(MPI_STATUS_SIZE) :: rece_status  ! mpi status (for mpi_wait)
-    integer, dimension(MPI_STATUS_SIZE) :: send_status  ! mpi status (for mpi_wait)
-    integer, dimension(:,:),allocatable :: send_request ! mpi status of nonblocking send
-    integer                             :: rece_i_min   ! the minimal indice from where belong the scalar field I receive
-    integer                             :: rece_i_max   ! the maximal indice from where belong the scalar field I receive
-    integer                             :: ierr         ! mpi error code
-    integer                             :: comm_size    ! number of element to send/receive
-    integer                             :: tag          ! mpi message tag
-    ! with wich I communicate.
-
-
-    ! Determine the communication needed : who will communicate whit who ? (ie compute sender and recer)
-    call AC_obtain_senders(send_j_min, send_j_max, direction, ind_group, proc_min, proc_max, rece_proc)
-    ! Send the information
-    allocate(send_request(proc_min:proc_max,3))
-    send_request(:,3)=0
-    do proc_gap = proc_min, proc_max
-       ! Compute the rank of the target processus
-       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
-       send_gap = proc_gap*N_proc(direction)
-       send_range(1) = max(send_j_min, send_gap+1) ! fortran => indice start from 0
-       send_range(2) = min(send_j_max, send_gap+N_proc(direction)) 
-       if (send_rank/=D_rank(direction)) then
-          ! Determine quantity of information to send
-          comm_size = send_range(2)-send_range(1)+1
-          ! Send the range of the scalar field send
-          tag = compute_tag(ind_group, tag_bufToScal_range, direction, proc_gap)
-          call mpi_Isend(send_range(1), 2, MPI_INTEGER, send_rank, tag, D_comm(direction), send_request(proc_gap,1)&
-               & , ierr)
-          ! And send the buffer
-          tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, proc_gap)
-          call mpi_Isend(send_buffer(send_range(1)),comm_size, MPI_DOUBLE_PRECISION, send_rank, &
-               & tag, D_comm(direction), send_request(proc_gap,2), ierr)
-          send_request(proc_gap,3) = 1
-       else
-          ! I have to distribute the buffer in myself
-          do i = send_range(1), send_range(2)
-             scal1D(i-send_gap) = scal1D(i-send_gap) + send_buffer(i)
-          end do
-       end if
-    end do
-
-    ! Receive information
-    ! Allocate field
-    allocate(rece_rank(rece_proc(1):rece_proc(2)))
-    allocate(rece_range(2,rece_proc(1):rece_proc(2)))  ! be careful that mpi use contiguous memory element
-    allocate(rece_request(rece_proc(1):rece_proc(2)))
-    ! Receive range
-    do proc_gap = rece_proc(1), rece_proc(2)
-       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rece_rank(proc_gap), ierr)
-       if (rece_rank(proc_gap)/=D_rank(direction)) then
-          tag = compute_tag(ind_group, tag_bufToScal_range, direction, -proc_gap)
-          call mpi_Irecv(rece_range(1,proc_gap), 2, MPI_INTEGER, rece_rank(proc_gap), tag, D_comm(direction), &
-               & rece_request(proc_gap), ierr) ! we use tag = source rank
-       end if
-    end do
-    ! Check reception
-    do proc_gap = rece_proc(1), rece_proc(2)
-       if (rece_rank(proc_gap)/=D_rank(direction)) then
-          call mpi_wait(rece_request(proc_gap), rece_status, ierr)
-       end if
-    end do
-    deallocate(rece_request)
-    ! Receive buffer and remesh it
-    ! XXX Possible optimisation : an optimal code will
-    !   1 - have non-blocking reception of scalar buffers
-    !   2 - check when a reception is done and then update the scalar
-    !   3 - iterate step 2 until all message was rece and that the scalar
-    !       field was update with all the scalar buffers
-    do proc_gap = rece_proc(1), rece_proc(2)
-       if (rece_rank(proc_gap)/=D_rank(direction)) then
-          rece_i_min = rece_range(1,proc_gap)
-          rece_i_max = rece_range(2,proc_gap)
-          ! Receive information
-          comm_size=(rece_i_max-rece_i_min+1)
-          allocate(rece_buffer(rece_i_min:rece_i_max)) ! XXX possible optimisation
-          ! by allocating one time to the max size, note that the range use in
-          ! this allocation instruction is include in (1, N_proc(2))
-          tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, -proc_gap)
-          call mpi_recv(rece_buffer(rece_i_min), comm_size, MPI_DOUBLE_PRECISION, &
-               & rece_rank(proc_gap), tag, D_comm(direction), rece_status, ierr)
-          ! Update the scalar field
-          send_gap = proc_gap*N_proc(direction)
-          scal1D(rece_i_min+send_gap:rece_i_max+send_gap) = scal1D(rece_i_min+send_gap:rece_i_max+send_gap) &
-               & + rece_buffer(rece_i_min:rece_i_max)
-          deallocate(rece_buffer)
-       end if
-    end do
-
-
-    ! Free Isend buffer
-    do proc_gap = proc_min, proc_max
-       if (send_request(proc_gap,3)==1) then
-          call mpi_wait(send_request(proc_gap,1), send_status, ierr)
-          call mpi_wait(send_request(proc_gap,2), send_status, ierr)
-       end if
-    end do
-    deallocate(send_request)
-
-    deallocate(rece_range)
-    deallocate(rece_rank)
-
-  end subroutine AC_bufferToScalar_group
-
   ! ===================================================================
   ! ====================     Remesh particles      ====================
   ! ===================================================================
@@ -1698,12 +1554,12 @@ contains
   !! tagged too.
   !! The following algorithm is write for block of minimal size.
   !! @author = Jean-Baptiste Lagaert, LEGI/Ljk
-  subroutine AC_remesh_lambda2corrected_array(direction, p_pos_adim, scal1D, bl_type, bl_tag, ind_min, ind_max, send_buffer)
+  subroutine AC_remesh_lambda2corrected_array(direction,p_pos_adim,scal1D,bl_type,bl_tag,ind_min,ind_max,send_buffer)
 
     ! Input/Output
     integer, intent(in)                                 :: direction
     real(WP), dimension(:), intent(in)                  :: p_pos_adim
-    real(WP), dimension(N_proc(direction)), intent(in)  :: scal1D
+    real(WP), dimension(:), intent(in)  :: scal1D
     logical, dimension(:), intent(in)                   :: bl_type
     logical, dimension(:), intent(in)                   :: bl_tag
     integer, intent(in)                                 :: ind_min, ind_max
@@ -1729,10 +1585,10 @@ contains
           ! XXX Debug - end
           if (bl_type(bl_ind)) then
              ! tagged, the first particle belong to a centered block and the last to left block.
-         !!!!!     call AC_remesh_tag_CL(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
+             call AC_remesh_tag_CL(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
           else
              ! tagged, the first particle belong to a left block and the last to centered block.
-             !!!!! call AC_remesh_tag_LC(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
+             call AC_remesh_tag_LC(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
           end if
        else
           if (bl_type(bl_ind)) then
@@ -1775,12 +1631,12 @@ contains
   !! tagged too.
   !! The following algorithm is write for block of minimal size.
   !! @author = Jean-Baptiste Lagaert, LEGI/Ljk
-  subroutine AC_remesh_lambda2corrected_pter(direction, p_pos_adim, scal1D, bl_type, bl_tag, ind_min, ind_max, send_buffer)
+  subroutine AC_remesh_lambda2corrected_pter(direction,p_pos_adim,scal1D,bl_type,bl_tag,ind_min,ind_max,send_buffer)
 
     ! Input/Output
     integer, intent(in)                                         :: direction
     real(WP), dimension(:), intent(in)                          :: p_pos_adim
-    real(WP), dimension(N_proc(direction)), intent(in)          :: scal1D
+    real(WP), dimension(:), intent(in)          :: scal1D
     logical, dimension(:), intent(in)                           :: bl_type
     logical, dimension(:), intent(in)                           :: bl_tag
     integer, intent(in)                                         :: ind_min, ind_max
@@ -1806,10 +1662,10 @@ contains
           ! XXX Debug - end
           if (bl_type(bl_ind)) then
              ! tagged, the first particle belong to a centered block and the last to left block.
-          !!!!   call AC_remesh_tag_CL(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
+             !!call AC_remesh_tag_CL(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
           else
              ! tagged, the first particle belong to a left block and the last to centered block.
-          !!!!   call AC_remesh_tag_LC(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
+             !!call AC_remesh_tag_LC(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
           end if
        else
           if (bl_type(bl_ind)) then
@@ -1817,7 +1673,7 @@ contains
              !!call AC_remesh_center(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
           else
              ! First particle is remeshed with left formula
-            !! call AC_remesh_left(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
+             !!call AC_remesh_left(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
           end if
           if (bl_type(bl_ind+1)) then
              ! Second particle is remeshed with center formula
@@ -1856,7 +1712,7 @@ contains
     ! Input/Output
     integer, intent(in)                                 :: direction
     real(WP), dimension(:), intent(in)                  :: p_pos_adim
-    real(WP), dimension(N_proc(direction)), intent(in)  :: scal1D
+    real(WP), dimension(:), intent(in)  :: scal1D
     logical, dimension(:), intent(in)                   :: bl_type
     logical, dimension(:), intent(in)                   :: bl_tag
     integer, intent(in)                                 :: ind_min, ind_max
@@ -1902,7 +1758,6 @@ contains
 
   end subroutine AC_remesh_lambda4corrected
 
-
   !> Left remeshing formula of order 2
   !!      @param[in]       pos_adim= adimensionned particle position
   !!      @param[in]       sca     = scalar advected by the particle
diff --git a/HySoP/src/Unstable/LEGI/src/particles/advec_common_line.f90 b/HySoP/src/Unstable/LEGI/src/particles/advec_common_line.f90
index 0bc1e0a23..33bcafb2f 100644
--- a/HySoP/src/Unstable/LEGI/src/particles/advec_common_line.f90
+++ b/HySoP/src/Unstable/LEGI/src/particles/advec_common_line.f90
@@ -35,48 +35,47 @@
 !! Jean-Baptiste Lagaert, LEGI
 !
 !------------------------------------------------------------------------------
-      
-module advec_common_line
 
-    use precision
-    use string
-    use cart_topology
-    use advec_variables
-    use mpi
+module advec_common_line
+  
+  use mpi
+  use precision
+  use cart_topology
+  use advec_variables
 
-    implicit none
+  implicit none
 
-! ===== Public procedures =====
+  ! ===== Public procedures =====
 
-!----- To interpolate velocity -----
-!!!!! public                              :: AC_obtain_receivers_line
-!----- Determine block type and tag particles -----
-public                              :: AC_type_and_block_line
-!----- To remesh particles -----
-public                              :: AC_obtain_senders_line
-public                              :: AC_bufferToScalar_line
+  !----- To interpolate velocity -----
+  !! public                              :: AC_obtain_receivers_line
+  !----- Determine block type and tag particles -----
+  public                              :: AC_type_and_block_line
+  !----- To remesh particles -----
+  public                              :: AC_obtain_senders_line
+  public                              :: AC_bufferToScalar_line
 
 contains
 
-! ===== Public procedure =====
+  ! ===== Public procedure =====
 
 
-! ==================================================================================
-! ====================     Compute particle velocity (RK2)      ====================
-! ==================================================================================
+  ! ==================================================================================
+  ! ====================     Compute particle velocity (RK2)      ====================
+  ! ==================================================================================
 
-!> Determine the set of processes wich will send me information during the velocity interpolation.
-!!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
-!!    @param[in]    ind_group       = coordinate of the current group of lines
-!!    @param[in]    rece_ind_min    = minimal indice of mesh involved in remeshing particles (of the my local subdomains)
-!!    @param[in]    rece_ind_max    = maximal indice of mesh involved in remeshing particles (of the my local subdomains)
-!!    @param[out]   send_gap        = gap between my coordinate and the processes of minimal coordinate which will send information to me
-!!    @param[out]   rece_gap        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
-!! @details
-!!    Obtain the list of processus wich need a part of my local velocity field
-!!    to interpolate the velocity used in the RK2 scheme to advect its particles.
-subroutine AC_obtain_receivers(direction, ind_group, rece_ind_min, rece_ind_max, send_gap, rece_gap)
-! XXX Work only for periodic condition.
+  !> Determine the set of processes wich will send me information during the velocity interpolation.
+  !!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
+  !!    @param[in]    ind_group       = coordinate of the current group of lines
+  !!    @param[in]    rece_ind_min    = minimal indice of mesh involved in remeshing particles (of the my local subdomains)
+  !!    @param[in]    rece_ind_max    = maximal indice of mesh involved in remeshing particles (of the my local subdomains)
+  !!    @param[out]   send_gap        = gap between my coordinate and the processes of minimal coordinate which will send information to me
+  !!    @param[out]   rece_gap        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+  !! @details
+  !!    Obtain the list of processus wich need a part of my local velocity field
+  !!    to interpolate the velocity used in the RK2 scheme to advect its particles.
+  subroutine AC_obtain_receivers(direction, ind_group, rece_ind_min, rece_ind_max, send_gap, rece_gap)
+    ! XXX Work only for periodic condition.
 
     ! Input/Ouput
     integer, intent(in)                 :: rece_ind_min, rece_ind_max
@@ -86,9 +85,9 @@ subroutine AC_obtain_receivers(direction, ind_group, rece_ind_min, rece_ind_max,
     integer, dimension(MPI_STATUS_SIZE) :: statut
     ! Others 
     integer                             :: proc_gap         ! gap between a processus coordinate (along the current 
-                                                            ! direction) into the mpi-topology and my coordinate
+    ! direction) into the mpi-topology and my coordinate
     integer                             :: rece_gapP        ! gap between the coordinate of the previous processus (in the current direction)
-                                                            ! and the processes of maximal coordinate which will receive information from it
+    ! and the processes of maximal coordinate which will receive information from it
     integer                             :: rece_gapN        ! same as above but for the next processus
     integer                             :: rankP, rankN     ! processus rank for shift (P= previous, N = next)
     integer                             :: tag_min, tag_max ! mpi message tag (for communicate rece_proc(1) and rece_proc(2))
@@ -106,7 +105,7 @@ subroutine AC_obtain_receivers(direction, ind_group, rece_ind_min, rece_ind_max,
 
     rece_gap(1) = floor(real(rece_ind_min-1)/N_proc(direction))
     rece_gap(2) = floor(real(rece_ind_max-1)/N_proc(direction))
-    
+
     ! ===== Communicate with my neigbors -> obtain ghost ! ====
     ! Compute their rank
     call mpi_cart_shift(D_comm(direction), 0, 1, rankP, rankN, ierr)
@@ -125,84 +124,85 @@ subroutine AC_obtain_receivers(direction, ind_group, rece_ind_min, rece_ind_max,
     test_request = .false.
     tag_table = compute_tag(ind_group, tag_obtrec_NP, direction)
     do proc_gap = rece_gap(1), rece_gap(2)
-        ! Compute the rank of the target processus
-        call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
-        ! Determine if I am the the first or the last processes (considering the current directory) 
-            ! to require information from this processus
-        if (proc_gap>rece_gapP-1) then
-            if(rankN /= D_rank(direction)) then
-                call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(1), D_comm(direction), s_request(proc_gap,1), ierr)
-                test_request(proc_gap,1) = .true.
-            else
-                send_gap(1) = -proc_gap
-            end if
-        end if
-        if (proc_gap<rece_gapN+1) then
-            if(rankN /= D_rank(direction)) then
-                test_request(proc_gap,2) = .true.
-                call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(2), D_comm(direction), s_request(proc_gap,2), ierr)
-            else
-                send_gap(2) = -proc_gap
-            end if
-        end if
+       ! Compute the rank of the target processus
+       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
+       ! Determine if I am the the first or the last processes (considering the current directory) 
+       ! to require information from this processus
+       if (proc_gap>rece_gapP-1) then
+          if(rankN /= D_rank(direction)) then
+             call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(1), D_comm(direction), s_request(proc_gap,1), ierr)
+             test_request(proc_gap,1) = .true.
+          else
+             send_gap(1) = -proc_gap
+          end if
+       end if
+       if (proc_gap<rece_gapN+1) then
+          if(rankN /= D_rank(direction)) then
+             test_request(proc_gap,2) = .true.
+             call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(2), D_comm(direction), s_request(proc_gap,2), ierr)
+          else
+             send_gap(2) = -proc_gap
+          end if
+       end if
     end do
 
 
     ! ===== Receive information form the first and the last processus which need a part of my local velocity field =====
     if (send_gap(1) == 3*N(direction)) then
-        call mpi_recv(send_gap(1), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+       call mpi_recv(send_gap(1), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
     end if
     if (send_gap(2) == 3*N(direction)) then
-        call mpi_recv(send_gap(2), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+       call mpi_recv(send_gap(2), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
     end if
 
 
     call MPI_WAIT(send_request,statut,ierr)
     call MPI_WAIT(send_request_bis,statut,ierr)
     do proc_gap = rece_gap(1), rece_gap(2)
-        if (test_request(proc_gap,1).eqv. .true.) call MPI_WAIT(s_request(proc_gap,1),statut,ierr)
-        if (test_request(proc_gap,2)) call MPI_WAIT(s_request(proc_gap,2),statut,ierr)
+       if (test_request(proc_gap,1).eqv. .true.) call MPI_WAIT(s_request(proc_gap,1),statut,ierr)
+       if (test_request(proc_gap,2)) call MPI_WAIT(s_request(proc_gap,2),statut,ierr)
     end do
     deallocate(s_request)
     deallocate(test_request)
 
-end subroutine AC_obtain_receivers
-
-
-! ===================================================================================================
-! ====================     Others than velocity interpolation and remeshing      ====================
-! ===================================================================================================
-!> Determine type (center or left) of each block of a line and tag particle of this line to know where
-!! corrected remeshing formula are recquired.
-!!    @param[in]        dt          = time step
-!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
-!!    @param[in]        ind_group   = coordinate of the current group of lines
-!!    @param[in]        p_V         = particle velocity (along the current direction)
-!!    @param[out]       bl_type     = table of blocks type (center of left)
-!!    @param[out]       bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block 
-!!                                    and the begining of the following one is tagged)
-!! @details
-!!        This subroutine deals with a single line. For each line of this group, it
-!!    determine the type of each block of this line and where corrected remeshing
-!!    formula are required. In those points, it tagg block transition (ie the end of
-!!    the current block and the beginning of the following one) in order to indicate
-!!    that corrected weigth have to be used during the remeshing.
-subroutine AC_type_and_block_line(dt, direction, ind_group, p_V, &
-                    & bl_type, bl_tag)
+  end subroutine AC_obtain_receivers
+
+
+  ! ===================================================================================================
+  ! ====================     Others than velocity interpolation and remeshing      ====================
+  ! ===================================================================================================
+  !> Determine type (center or left) of each block of a line and tag particle of this line to know where
+  !! corrected remeshing formula are recquired.
+  !!    @param[in]        dt          = time step
+  !!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        p_V         = particle velocity (along the current direction)
+  !!    @param[out]       bl_type     = table of blocks type (center of left)
+  !!    @param[out]       bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block 
+  !!                                    and the begining of the following one is tagged)
+  !! @details
+  !!        This subroutine deals with a single line. For each line of this group, it
+  !!    determine the type of each block of this line and where corrected remeshing
+  !!    formula are required. In those points, it tagg block transition (ie the end of
+  !!    the current block and the beginning of the following one) in order to indicate
+  !!    that corrected weigth have to be used during the remeshing.
+  subroutine AC_type_and_block_line(dt, direction, ind_group, p_V, &
+       & bl_type, bl_tag)
 
     ! In/Out variables
     real(WP), intent(in)                                    :: dt           ! time step
     integer, intent(in)                                     :: direction
     integer, dimension(2), intent(in)                       :: ind_group
     real(WP), dimension(:), intent(in)                      :: p_V
-    logical, dimension(bl_nb(direction)+1), intent(out)     :: bl_type      ! is the particle block a center block or a left one ?
-    logical, dimension(bl_nb(direction)), intent(out)       :: bl_tag       ! indice of tagged particles
+    !    logical, dimension(bl_nb(direction))+1), intent(out)     :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(:), intent(inout) :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(:), intent(inout)       :: bl_tag       ! indice of tagged particles
     ! Local variables
     real(WP), dimension(bl_nb(direction)+1)                 :: bl_lambdaMin ! for a particle, lamda = V*dt/dx ;  bl_lambdaMin = min of 
-                                                                            ! lambda on a block (take also into account first following particle)
+    ! lambda on a block (take also into account first following particle)
     real(WP)                                                :: lambP, lambN ! buffer to exchange some lambda min with other processus
     integer, dimension(bl_nb(direction)+1)                  :: bl_ind       ! block index : integer as lambda in (bl_ind,bl_ind+1) for a left block
-                                                                            ! and lambda in (bl_ind-1/2, bl_ind+1/2) for a right block 
+    ! and lambda in (bl_ind-1/2, bl_ind+1/2) for a right block 
     integer                                                 :: ind, i_p     ! some indices
     real(WP)                                                :: cfl          ! = d_sc
     integer                                                 :: rankP, rankN ! processus rank for shift (P= previous, N = next)
@@ -240,8 +240,8 @@ subroutine AC_type_and_block_line(dt, direction, ind_group, p_V, &
 
     ! -- For the "middle" block --
     do ind = 2, bl_nb(direction)
-        i_p = ((ind-1)*bl_size) + 1 - bl_size/2
-        bl_lambdaMin(ind) = minval(p_V(i_p:i_p+bl_size))*cfl
+       i_p = ((ind-1)*bl_size) + 1 - bl_size/2
+       bl_lambdaMin(ind) = minval(p_V(i_p:i_p+bl_size))*cfl
     end do
 
     ! -- For the first block (1/2) --
@@ -266,45 +266,45 @@ subroutine AC_type_and_block_line(dt, direction, ind_group, p_V, &
 
     ! ===== Tag particles =====
     do ind = 1, bl_nb(direction)
-        bl_tag(ind) = ((bl_ind(ind)/=bl_ind(ind+1)) .and. (bl_type(ind).neqv.bl_type(ind+1)))
+       bl_tag(ind) = ((bl_ind(ind)/=bl_ind(ind+1)) .and. (bl_type(ind).neqv.bl_type(ind+1)))
     end do
 
     call mpi_wait(send_request(1), send_status, ierr)
     call mpi_wait(send_request(2), send_status, ierr)
 
-end subroutine AC_type_and_block_line
-
-
-! ===================================================================
-! ====================     Remesh particles      ====================
-! ===================================================================
-
-!> Determine the set of processes wich will send me information during the
-!!  scalar remeshing. Use implicit computation rather than communication (only
-!!  possible if particle are gather by block whith contrainst on velocity variation
-!!  - as corrected lambda formula.) - work on only a line of particles.
-!!    @param[in]    send_i_min  = minimal indice of the send buffer
-!!    @param[in]    send_i_max  = maximal indice of the send buffer
-!!    @param[in]    direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
-!!    @param[in]    ind_group   = coordinate of the current group of lines
-!!    @param[out]   proc_min    = gap between my coordinate and the processes of minimal coordinate which will receive information from me
-!!    @param[out]   proc_max    = gap between my coordinate and the processes of maximal coordinate which will receive information from me
-!!    @param[out]   rece_proc   = coordinate range of processes which will send me information during the remeshing.
-!! @details
-!!    Obtain the list of processus which contains some particles which belong to
-!!    my subdomains after their advection (and thus which will be remeshing into
-!!    my subdomain). This result is return as an interval [send_min; send_max].
-!!    All the processus whose coordinate (into the current direction) belong to 
-!!    this segment are involved into scalar remeshing into the current
-!!    subdomains. This routine does not involve any computation to determine if
-!!    a processus is the first or the last processes (considering its coordinate along 
-!!    the current directory) to send remeshing information to a given processes.
-!!    It directly compute it using contraints on velocity (as in corrected lambda
-!!    scheme) When possible use it rather than AC_obtain_senders_com
-subroutine AC_obtain_senders_line(send_i_min, send_i_max, direction, ind_group, proc_min, proc_max, rece_proc)
-! XXX Work only for periodic condition. For dirichlet conditions : it is
-! possible to not receive either rece_proc(1), either rece_proc(2) or none of
-! these two => detect it (track the first and the last particles) and deal with it.
+  end subroutine AC_type_and_block_line
+
+
+  ! ===================================================================
+  ! ====================     Remesh particles      ====================
+  ! ===================================================================
+
+  !> Determine the set of processes wich will send me information during the
+  !!  scalar remeshing. Use implicit computation rather than communication (only
+  !!  possible if particle are gather by block whith contrainst on velocity variation
+  !!  - as corrected lambda formula.) - work on only a line of particles.
+  !!    @param[in]    send_i_min  = minimal indice of the send buffer
+  !!    @param[in]    send_i_max  = maximal indice of the send buffer
+  !!    @param[in]    direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+  !!    @param[in]    ind_group   = coordinate of the current group of lines
+  !!    @param[out]   proc_min    = gap between my coordinate and the processes of minimal coordinate which will receive information from me
+  !!    @param[out]   proc_max    = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+  !!    @param[out]   rece_proc   = coordinate range of processes which will send me information during the remeshing.
+  !! @details
+  !!    Obtain the list of processus which contains some particles which belong to
+  !!    my subdomains after their advection (and thus which will be remeshing into
+  !!    my subdomain). This result is return as an interval [send_min; send_max].
+  !!    All the processus whose coordinate (into the current direction) belong to 
+  !!    this segment are involved into scalar remeshing into the current
+  !!    subdomains. This routine does not involve any computation to determine if
+  !!    a processus is the first or the last processes (considering its coordinate along 
+  !!    the current directory) to send remeshing information to a given processes.
+  !!    It directly compute it using contraints on velocity (as in corrected lambda
+  !!    scheme) When possible use it rather than AC_obtain_senders_com
+  subroutine AC_obtain_senders_line(send_i_min, send_i_max, direction, ind_group, proc_min, proc_max, rece_proc)
+    ! XXX Work only for periodic condition. For dirichlet conditions : it is
+    ! possible to not receive either rece_proc(1), either rece_proc(2) or none of
+    ! these two => detect it (track the first and the last particles) and deal with it.
 
     ! Input/output
     integer, intent(in)                 :: send_i_min
@@ -316,7 +316,7 @@ subroutine AC_obtain_senders_line(send_i_min, send_i_max, direction, ind_group,
     integer, dimension(MPI_STATUS_SIZE) :: statut
     ! Other local variable
     integer(kind=4)                     :: proc_gap         ! gap between a processus coordinate (along the current 
-                                                            ! direction) into the mpi-topology and my coordinate
+    ! direction) into the mpi-topology and my coordinate
     integer                             :: rankP, rankN     ! processus rank for shift (P= previous, N = next)
     integer, dimension(2)               :: tag_table        ! mpi message tag (for communicate rece_proc(1) and rece_proc(2))
     integer, dimension(:,:),allocatable :: send_request     ! mpi status of nonblocking send
@@ -331,79 +331,79 @@ subroutine AC_obtain_senders_line(send_i_min, send_i_max, direction, ind_group,
 
     allocate(send_request(proc_min:proc_max,3))
     send_request(:,3) = 0
-    
+
     ! Send
     do proc_gap = proc_min, proc_max
-        ! Compute the rank of the target processus
-        call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
-        ! Determine if I am the the first or the last processes (considering my
-                ! coordinate along the current directory) to send information to
-                ! one of these processes.
-                ! Note that local indice go from 1 to N_proc (fortran).
-        ! I am the first ?
-        if ((send_i_min< +1-2*bl_bound_size + proc_gap*N_proc(direction)+1).AND. &
-                    & (send_i_max>= proc_gap*N_proc(direction))) then
-            if(rankN /= D_rank(direction)) then
-                call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(1), D_comm(direction), &
-                        & send_request(proc_gap,1), ierr)
-                send_request(proc_gap,3) = 1
-            else
-                rece_proc(1) = -proc_gap
-            end if
-        end if
-        ! I am the last ?
-        if ((send_i_max > -1+2*bl_bound_size + (proc_gap+1)*N_proc(direction)) &
-                    & .AND.(send_i_min<= (proc_gap+1)*N_proc(direction))) then
-            if(rankN /= D_rank(direction)) then
-                call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(2), D_comm(direction), &
-                        & send_request(proc_gap,2), ierr)
-                send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
-            else
-                rece_proc(2) = -proc_gap
-            end if
-        end if
+       ! Compute the rank of the target processus
+       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
+       ! Determine if I am the the first or the last processes (considering my
+       ! coordinate along the current directory) to send information to
+       ! one of these processes.
+       ! Note that local indice go from 1 to N_proc (fortran).
+       ! I am the first ?
+       if ((send_i_min< +1-2*bl_bound_size + proc_gap*N_proc(direction)+1).AND. &
+            & (send_i_max>= proc_gap*N_proc(direction))) then
+          if(rankN /= D_rank(direction)) then
+             call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(1), D_comm(direction), &
+                  & send_request(proc_gap,1), ierr)
+             send_request(proc_gap,3) = 1
+          else
+             rece_proc(1) = -proc_gap
+          end if
+       end if
+       ! I am the last ?
+       if ((send_i_max > -1+2*bl_bound_size + (proc_gap+1)*N_proc(direction)) &
+            & .AND.(send_i_min<= (proc_gap+1)*N_proc(direction))) then
+          if(rankN /= D_rank(direction)) then
+             call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(2), D_comm(direction), &
+                  & send_request(proc_gap,2), ierr)
+             send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
+          else
+             rece_proc(2) = -proc_gap
+          end if
+       end if
     end do
 
 
     ! Receive
     if (rece_proc(1) == 3*N(direction)) then
-        call mpi_recv(rece_proc(1), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+       call mpi_recv(rece_proc(1), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
     end if
     if (rece_proc(2) == 3*N(direction)) then
-        call mpi_recv(rece_proc(2), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+       call mpi_recv(rece_proc(2), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
     end if
 
     ! Free Isend buffer
     do proc_gap = proc_min, proc_max
-        select case (send_request(proc_gap,3))
-            case (3)
-                call mpi_wait(send_request(proc_gap,1), statut, ierr)
-                call mpi_wait(send_request(proc_gap,2), statut, ierr)
-            case (2)
-                call mpi_wait(send_request(proc_gap,2), statut, ierr)
-            case (1)
-                call mpi_wait(send_request(proc_gap,1), statut, ierr)
-        end select
+       select case (send_request(proc_gap,3))
+       case (3)
+          call mpi_wait(send_request(proc_gap,1), statut, ierr)
+          call mpi_wait(send_request(proc_gap,2), statut, ierr)
+       case (2)
+          call mpi_wait(send_request(proc_gap,2), statut, ierr)
+       case (1)
+          call mpi_wait(send_request(proc_gap,1), statut, ierr)
+       end select
     end do
 
-end subroutine AC_obtain_senders_line
-
-
-!> Common procedure for remeshing wich perform all the communcation and provide
-!! the update scalar field.
-!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
-!!    @param[in]        ind_group   = coordinate of the current group of lines
-!!    @param[in]        send_i_min  = minimal indice of the send buffer
-!!    @param[in]        send_i_max  = maximal indice of the send buffer
-!!    @param[out]       proc_min    = gap between my coordinate and the processes of minimal coordinate which will receive information from me
-!!    @param[out]       proc_max    = gap between my coordinate and the processes of maximal coordinate which will receive information from me
-!!    @param[out]       rece_proc   = coordinate range of processes which will send me information during the remeshing.
-!!    @param[in]        send_buffer = buffer use to remesh the scalar before to send it to the right subdomain
-!!    @param[in,out]    scal1D      = mono-dimensionnal scalar field to advect
-!! @details
-!!    Remeshing are done in a local buffer. This subroutine distribute this buffer 
-!!    to the right processes, receive the buffer send and update the scalar field.
-subroutine AC_bufferToScalar_line(direction, ind_group, send_i_min, send_i_max, proc_min, proc_max, rece_proc,send_buffer, scal1D)
+  end subroutine AC_obtain_senders_line
+
+
+  !> Common procedure for remeshing wich perform all the communcation and provide
+  !! the update scalar field.
+  !!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+  !!    @param[in]        ind_group   = coordinate of the current group of lines
+  !!    @param[in]        send_i_min  = minimal indice of the send buffer
+  !!    @param[in]        send_i_max  = maximal indice of the send buffer
+  !!    @param[out]       proc_min    = gap between my coordinate and the processes of minimal coordinate which will receive information from me
+  !!    @param[out]       proc_max    = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+  !!    @param[out]       rece_proc   = coordinate range of processes which will send me information during the remeshing.
+  !!    @param[in]        send_buffer = buffer use to remesh the scalar before to send it to the right subdomain
+  !!    @param[in,out]    scal1D      = mono-dimensionnal scalar field to advect
+  !! @details
+  !!    Remeshing are done in a local buffer. This subroutine distribute this buffer 
+  !!    to the right processes, receive the buffer send and update the scalar field.
+  subroutine AC_bufferToScalar_line(direction, ind_group, send_i_min, send_i_max, proc_min, proc_max, rece_proc,send_buffer, scal1D)
 
     ! Input/Ouptut
     integer, intent(in)                                     :: direction
@@ -411,11 +411,12 @@ subroutine AC_bufferToScalar_line(direction, ind_group, send_i_min, send_i_max,
     integer, intent(in)                                     :: send_i_min
     integer, intent(in)                                     :: send_i_max
     integer, dimension(2), intent(in)                       :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the 
-                                                                            ! one from which  I will receive data
+    ! one from which  I will receive data
     integer, intent(in)                                     :: proc_min     ! smaller gap between me and the processes to where I send data
     integer, intent(in)                                     :: proc_max     ! smaller gap between me and the processes to where I send data
     real(WP), dimension(send_i_min:send_i_max), intent(in)  :: send_buffer
-    real(WP), dimension(N_proc(direction)), intent(inout)   :: scal1D
+ !   real(WP), dimension(N_proc(direction)), intent(inout)   :: scal1D
+    real(WP), dimension(:), intent(inout)   :: scal1D
 
     ! Variables used to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
     ! design something I send (resp. I receive).
@@ -437,35 +438,35 @@ subroutine AC_bufferToScalar_line(direction, ind_group, send_i_min, send_i_max,
     integer                             :: ierr         ! mpi error code
     integer                             :: comm_size    ! number of element to send/receive
     integer                             :: tag          ! mpi message tag
-                                                        ! with wich I communicate.
+    ! with wich I communicate.
 
     ! Send the information
     allocate(send_request(proc_min:proc_max,3))
     send_request(:,3)=0
     do proc_gap = proc_min, proc_max
-            ! Compute the rank of the target processus
-            call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
-            send_gap = proc_gap*N_proc(direction)
-            send_range(1) = max(send_i_min, send_gap+1) ! fortran => indice start from 0
-            send_range(2) = min(send_i_max, send_gap+N_proc(direction)) 
-        if (send_rank/=D_rank(direction)) then
-            ! Determine quantity of information to send
-            comm_size = send_range(2)-send_range(1)+1
-            ! Send the range of the scalar field send
-            tag = compute_tag(ind_group, tag_bufToScal_range, direction, proc_gap)
-            call mpi_Isend(send_range(1), 2, MPI_INTEGER, send_rank, tag, D_comm(direction), send_request(proc_gap,1)&
-                    & , ierr)
-            ! And send the buffer
-            tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, proc_gap)
-            call mpi_Isend(send_buffer(send_range(1)),comm_size, MPI_DOUBLE_PRECISION, send_rank, &
-                        & tag, D_comm(direction), send_request(proc_gap,2), ierr)
-            send_request(proc_gap,3) = 1
-        else
-            ! I have to distribute the buffer in myself
-            do i = send_range(1), send_range(2)
-                scal1D(i-send_gap) = scal1D(i-send_gap) + send_buffer(i)
-            end do
-        end if
+       ! Compute the rank of the target processus
+       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+       send_gap = proc_gap*N_proc(direction)
+       send_range(1) = max(send_i_min, send_gap+1) ! fortran => indice start from 0
+       send_range(2) = min(send_i_max, send_gap+N_proc(direction)) 
+       if (send_rank/=D_rank(direction)) then
+          ! Determine quantity of information to send
+          comm_size = send_range(2)-send_range(1)+1
+          ! Send the range of the scalar field send
+          tag = compute_tag(ind_group, tag_bufToScal_range, direction, proc_gap)
+          call mpi_Isend(send_range(1), 2, MPI_INTEGER, send_rank, tag, D_comm(direction), send_request(proc_gap,1)&
+               & , ierr)
+          ! And send the buffer
+          tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, proc_gap)
+          call mpi_Isend(send_buffer(send_range(1)),comm_size, MPI_DOUBLE_PRECISION, send_rank, &
+               & tag, D_comm(direction), send_request(proc_gap,2), ierr)
+          send_request(proc_gap,3) = 1
+       else
+          ! I have to distribute the buffer in myself
+          do i = send_range(1), send_range(2)
+             scal1D(i-send_gap) = scal1D(i-send_gap) + send_buffer(i)
+          end do
+       end if
     end do
 
     ! Receive information
@@ -475,61 +476,59 @@ subroutine AC_bufferToScalar_line(direction, ind_group, send_i_min, send_i_max,
     allocate(rece_request(rece_proc(1):rece_proc(2)))
     ! Receive range
     do proc_gap = rece_proc(1), rece_proc(2)
-        call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rece_rank(proc_gap), ierr)
-        if (rece_rank(proc_gap)/=D_rank(direction)) then
-            tag = compute_tag(ind_group, tag_bufToScal_range, direction, -proc_gap)
-            call mpi_Irecv(rece_range(1,proc_gap), 2, MPI_INTEGER, rece_rank(proc_gap), tag, D_comm(direction), &
-                        & rece_request(proc_gap), ierr) ! we use tag = source rank
-        end if
+       call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rece_rank(proc_gap), ierr)
+       if (rece_rank(proc_gap)/=D_rank(direction)) then
+          tag = compute_tag(ind_group, tag_bufToScal_range, direction, -proc_gap)
+          call mpi_Irecv(rece_range(1,proc_gap), 2, MPI_INTEGER, rece_rank(proc_gap), tag, D_comm(direction), &
+               & rece_request(proc_gap), ierr) ! we use tag = source rank
+       end if
     end do
     ! Check reception
     do proc_gap = rece_proc(1), rece_proc(2)
-        if (rece_rank(proc_gap)/=D_rank(direction)) then
-            call mpi_wait(rece_request(proc_gap), rece_status, ierr)
-        end if
+       if (rece_rank(proc_gap)/=D_rank(direction)) then
+          call mpi_wait(rece_request(proc_gap), rece_status, ierr)
+       end if
     end do
     deallocate(rece_request)
     ! Receive buffer and remesh it
-        ! XXX Possible optimisation : an optimal code will
-        !   1 - have non-blocking reception of scalar buffers
-        !   2 - check when a reception is done and then update the scalar
-        !   3 - iterate step 2 until all message was rece and that the scalar
-        !       field was update with all the scalar buffers
+    ! XXX Possible optimisation : an optimal code will
+    !   1 - have non-blocking reception of scalar buffers
+    !   2 - check when a reception is done and then update the scalar
+    !   3 - iterate step 2 until all message was rece and that the scalar
+    !       field was update with all the scalar buffers
     do proc_gap = rece_proc(1), rece_proc(2)
-        if (rece_rank(proc_gap)/=D_rank(direction)) then
-            rece_i_min = rece_range(1,proc_gap)
-            rece_i_max = rece_range(2,proc_gap)
-            ! Receive information
-            comm_size=(rece_i_max-rece_i_min+1)
-            allocate(rece_buffer(rece_i_min:rece_i_max)) ! XXX possible optimisation
-                ! by allocating one time to the max size, note that the range use in
-                ! this allocation instruction is include in (1, N_proc(2))
-            tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, -proc_gap)
-            call mpi_recv(rece_buffer(rece_i_min), comm_size, MPI_DOUBLE_PRECISION, &
-                    & rece_rank(proc_gap), tag, D_comm(direction), rece_status, ierr)
-            ! Update the scalar field
-            send_gap = proc_gap*N_proc(direction)
-            scal1D(rece_i_min+send_gap:rece_i_max+send_gap) = scal1D(rece_i_min+send_gap:rece_i_max+send_gap) &
-                & + rece_buffer(rece_i_min:rece_i_max)
-            deallocate(rece_buffer)
-        end if
+       if (rece_rank(proc_gap)/=D_rank(direction)) then
+          rece_i_min = rece_range(1,proc_gap)
+          rece_i_max = rece_range(2,proc_gap)
+          ! Receive information
+          comm_size=(rece_i_max-rece_i_min+1)
+          allocate(rece_buffer(rece_i_min:rece_i_max)) ! XXX possible optimisation
+          ! by allocating one time to the max size, note that the range use in
+          ! this allocation instruction is include in (1, N_proc(2))
+          tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, -proc_gap)
+          call mpi_recv(rece_buffer(rece_i_min), comm_size, MPI_DOUBLE_PRECISION, &
+               & rece_rank(proc_gap), tag, D_comm(direction), rece_status, ierr)
+          ! Update the scalar field
+          send_gap = proc_gap*N_proc(direction)
+          scal1D(rece_i_min+send_gap:rece_i_max+send_gap) = scal1D(rece_i_min+send_gap:rece_i_max+send_gap) &
+               & + rece_buffer(rece_i_min:rece_i_max)
+          deallocate(rece_buffer)
+       end if
     end do
-          
+
 
     ! Free Isend buffer
     do proc_gap = proc_min, proc_max
-        if (send_request(proc_gap,3)==1) then
-            call mpi_wait(send_request(proc_gap,1), send_status, ierr)
-            call mpi_wait(send_request(proc_gap,2), send_status, ierr)
-        end if
+       if (send_request(proc_gap,3)==1) then
+          call mpi_wait(send_request(proc_gap,1), send_status, ierr)
+          call mpi_wait(send_request(proc_gap,2), send_status, ierr)
+       end if
     end do
     deallocate(send_request)
 
     deallocate(rece_range)
     deallocate(rece_rank)
 
-end subroutine AC_bufferToScalar_line
-
-
+  end subroutine AC_bufferToScalar_line
 
 end module advec_common_line
diff --git a/HySoP/src/Unstable/LEGI/src/particles/advec_variables.f90 b/HySoP/src/Unstable/LEGI/src/particles/advec_variables.f90
index 282bf6195..471315c19 100644
--- a/HySoP/src/Unstable/LEGI/src/particles/advec_variables.f90
+++ b/HySoP/src/Unstable/LEGI/src/particles/advec_variables.f90
@@ -26,78 +26,78 @@
 
 module advec_variables
 
-    use string
-    use precision
-    use cart_topology, only : N_proc,cart_rank
-
-    implicit none
-
-    ! --- In order to create an array of pointer ---
-    type real_pter
-        real(WP), pointer                           :: pter
-    end type real_pter
-    ! ---------------------------------------------
-
-    ! ===== Public and protected variables =====
-    ! ----- Minimal and maximal indice of the buffer used in the different communication -----
-    !> minimal indice of the send buffer
-    integer, public                             :: send_j_min
-    !> maximal indice of the send buffer
-    integer, public                             :: send_j_max
-    !> minimal indice used in remeshing of each line
-    integer,public,dimension(:,:),allocatable   :: send_group_min
-    !> maximal indice used in remeshing of each line
-    integer,public,dimension(:,:),allocatable   :: send_group_max
-    ! ------ Block infromation -----
-    ! solver choosen
-    character(len=str_short), protected         :: type_solv
-    !> number of particles in a block
-    integer, protected                          :: bl_size           
-    !> distance between the "central" mesh point and the extream mesh point of the stencil of points used to remesh a particle
-    integer, protected                          :: bl_bound_size     
-    !> Number of common meshes used in the remeshing of two successive particle
-    !! (in case off standart (ie non corrected) remeshing formula)).
-    integer, dimension(2), protected            :: bl_remesh_superposition
-    !> Number of block on each processus along each direction
-    integer, dimension(3), protected            :: bl_nb
-
-    ! ------ To ensure unique mpi message tag -----
-    ! Tag generate with a proc_gap
-    !> To create tag used in AC_particle_velocity to send range
-    integer, dimension(2), parameter            :: tag_velo_range = (/ 0,1 /)
-    !> To create tag used in AC_particle_velocity to send velocity field
-    integer, dimension(2), parameter            :: tag_velo_V = (/ 0,2 /)
-    !> To create tag used in bufferToScalar to send range of buffer which will be send
-    integer, dimension(2), parameter            :: tag_bufToScal_range = (/ 0,3 /)
-    !> To create tag used in bufferToScalar to send the buffer used to remesh particles
-    integer, dimension(2), parameter            :: tag_bufToScal_buffer = (/ 0,4 /)
-
-    ! Tag generate with "compute_gap_NP"
-    !> To create tag used in AC_obtain_recevers to send ghost
-    integer, dimension(2), parameter            :: tag_obtrec_ghost_NP = (/ 0, 1/)
-    !> To create tag used in AC_type_and_bloc to exchange ghost with neighbors
-    integer, dimension(2), parameter            :: tag_part_tag_NP = (/ 0, 2/)
-    !> To create tag used in AC_obtain_recevers to send message about recevers of minimal and maximal rank
-    integer, dimension(2), parameter            :: tag_obtrec_NP = (/ 0, 3/)
-    !> To create tag used in AC_obtain_receivers to send message about senders of minimal and maximal rank
-    integer, dimension(2), parameter            :: tag_obtsend_NP = (/ 0, 4/)
-
-    ! ===== Public procedures =====
-    !----- Initialize solver -----
-    public                                      :: AC_solver_init
-    public                                      :: AC_set_part_bound_size
+  use string
+  use precision
+  use cart_topology, only : N_proc,cart_rank
+
+  implicit none
+
+  ! --- In order to create an array of pointer ---
+  type real_pter
+     real(WP), pointer                           :: pter
+  end type real_pter
+  ! ---------------------------------------------
+
+  ! ===== Public and protected variables =====
+  ! ----- Minimal and maximal indice of the buffer used in the different communication -----
+  !> minimal indice of the send buffer
+  integer, public                             :: send_j_min
+  !> maximal indice of the send buffer
+  integer, public                             :: send_j_max
+  !> minimal indice used in remeshing of each line
+  integer,public,dimension(:,:),allocatable   :: send_group_min
+  !> maximal indice used in remeshing of each line
+  integer,public,dimension(:,:),allocatable   :: send_group_max
+  ! ------ Block infromation -----
+  ! solver choosen
+  character(len=str_short), protected         :: type_solv
+  !> number of particles in a block
+  integer, protected                          :: bl_size           
+  !> distance between the "central" mesh point and the extream mesh point of the stencil of points used to remesh a particle
+  integer, protected                          :: bl_bound_size     
+  !> Number of common meshes used in the remeshing of two successive particle
+  !! (in case off standart (ie non corrected) remeshing formula)).
+  integer, dimension(2), protected            :: bl_remesh_superposition
+  !> Number of block on each processus along each direction
+  integer, dimension(3), protected            :: bl_nb
+
+  ! ------ To ensure unique mpi message tag -----
+  ! Tag generate with a proc_gap
+  !> To create tag used in AC_particle_velocity to send range
+  integer, dimension(2), parameter            :: tag_velo_range = (/ 0,1 /)
+  !> To create tag used in AC_particle_velocity to send velocity field
+  integer, dimension(2), parameter            :: tag_velo_V = (/ 0,2 /)
+  !> To create tag used in bufferToScalar to send range of buffer which will be send
+  integer, dimension(2), parameter            :: tag_bufToScal_range = (/ 0,3 /)
+  !> To create tag used in bufferToScalar to send the buffer used to remesh particles
+  integer, dimension(2), parameter            :: tag_bufToScal_buffer = (/ 0,4 /)
+
+  ! Tag generate with "compute_gap_NP"
+  !> To create tag used in AC_obtain_recevers to send ghost
+  integer, dimension(2), parameter            :: tag_obtrec_ghost_NP = (/ 0, 1/)
+  !> To create tag used in AC_type_and_bloc to exchange ghost with neighbors
+  integer, dimension(2), parameter            :: tag_part_tag_NP = (/ 0, 2/)
+  !> To create tag used in AC_obtain_recevers to send message about recevers of minimal and maximal rank
+  integer, dimension(2), parameter            :: tag_obtrec_NP = (/ 0, 3/)
+  !> To create tag used in AC_obtain_receivers to send message about senders of minimal and maximal rank
+  integer, dimension(2), parameter            :: tag_obtsend_NP = (/ 0, 4/)
+
+  ! ===== Public procedures =====
+  !----- Initialize solver -----
+  public                                      :: AC_solver_init
+  public                                      :: AC_set_part_bound_size
 
 contains
 
-! ====================================================================
-! ====================    Initialize context      ====================
-! ====================================================================
+  ! ====================================================================
+  ! ====================    Initialize context      ====================
+  ! ====================================================================
 
-!> Initialize some variable related to the solver implementation (and which
-!! depend of the resmeshing formula choosen and the dimmensionnal splitting used).
-!!    @param[in]        part_solv   = remeshing formula choosen (spcae order, ...)
-!!    @param[in]        verbosity   = to display info about chosen remeshing formula (optional)
-subroutine AC_solver_init(part_solv, verbosity)
+  !> Initialize some variable related to the solver implementation (and which
+  !! depend of the resmeshing formula choosen and the dimmensionnal splitting used).
+  !!    @param[in]        part_solv   = remeshing formula choosen (spcae order, ...)
+  !!    @param[in]        verbosity   = to display info about chosen remeshing formula (optional)
+  subroutine AC_solver_init(part_solv, verbosity)
 
     ! Input/Output
     character(len=*), optional, intent(in)  ::  part_solv
@@ -113,63 +113,63 @@ subroutine AC_solver_init(part_solv, verbosity)
 
     ! Initialisation part adapted to each method
     select case(type_solv)
-        case('p_O2')
-            bl_size = 2
-            bl_bound_size = 1
-            if ((cart_rank==0).and.(verbose)) then
-                write(*,'(6x,a)') '========== Advection scheme ========='
-                write(*,'(6x,a)') ' particle method, corrected lambda 2 '
-                write(*,'(6x,a)') '====================================='
-            end if
-        case('p_O4')
-            bl_size = 4
-            bl_bound_size = 2
-            if ((cart_rank==0).and.(verbose)) then
-                write(*,'(6x,a)') '========== Advection scheme ========='
-                write(*,'(6x,a)') ' particle method, corrected lambda 4 '
-                write(*,'(6x,a)') '====================================='
-            end if
-        case('p_M6')
-            bl_size = 1
-            bl_bound_size = 3   ! Be aware : don't use it to compute superposition between
-                                ! mpi processes (not as predictible as corrected scheme)
-            if ((cart_rank==0).and.(verbose)) then
-                write(*,'(6x,a)') '========== Advection scheme ========='
-                write(*,'(6x,a)') ' particle method, corrected M prime 6'
-                write(*,'(6x,a)') '====================================='
-            end if
-        case default
-            bl_size = 2
-            bl_bound_size = 1
-            if ((cart_rank==0).and.(verbose)) then
-                write(*,'(6x,a)') '========== Advection scheme ========='
-                write(*,'(6x,a)') ' particle method, corrected lambda 2 '
-                write(*,'(6x,a)') '====================================='
-            end if
+    case('p_O2')
+       bl_size = 2
+       bl_bound_size = 1
+       if ((cart_rank==0).and.(verbose)) then
+          write(*,'(6x,a)') '========== Advection scheme ========='
+          write(*,'(6x,a)') ' particle method, corrected lambda 2 '
+          write(*,'(6x,a)') '====================================='
+       end if
+    case('p_O4')
+       bl_size = 4
+       bl_bound_size = 2
+       if ((cart_rank==0).and.(verbose)) then
+          write(*,'(6x,a)') '========== Advection scheme ========='
+          write(*,'(6x,a)') ' particle method, corrected lambda 4 '
+          write(*,'(6x,a)') '====================================='
+       end if
+    case('p_M6')
+       bl_size = 1
+       bl_bound_size = 3   ! Be aware : don't use it to compute superposition between
+       ! mpi processes (not as predictible as corrected scheme)
+       if ((cart_rank==0).and.(verbose)) then
+          write(*,'(6x,a)') '========== Advection scheme ========='
+          write(*,'(6x,a)') ' particle method, corrected M prime 6'
+          write(*,'(6x,a)') '====================================='
+       end if
+    case default
+       bl_size = 2
+       bl_bound_size = 1
+       if ((cart_rank==0).and.(verbose)) then
+          write(*,'(6x,a)') '========== Advection scheme ========='
+          write(*,'(6x,a)') ' particle method, corrected lambda 2 '
+          write(*,'(6x,a)') '====================================='
+       end if
     end select
 
     ! Check if the subdomain contain a number of mesh wich could be divided by bl_size
     if ((modulo(N_proc(1),bl_size)/=0).OR.(modulo(N_proc(2),bl_size)/=0).OR.(modulo(N_proc(3),bl_size)/=0)) then
-        if (cart_rank ==0) print*, 'Number of mesh by processus must be a muliple of ', bl_size
-        stop
+       if (cart_rank ==0) print*, 'Number of mesh by processus must be a muliple of ', bl_size
+       stop
     end if
 
     ! Compute local number of block along each direction
     bl_nb = N_proc/bl_size
 
-end subroutine AC_solver_init
+  end subroutine AC_solver_init
 
-!> Manually change protected variable "bl_bound_size" - purpose test only (for
-!! auto-validation tests)
-!!    @param[in]        bound_size   = wanted value of "bl_bound_part"
-subroutine AC_set_part_bound_size(bound_size)
+  !> Manually change protected variable "bl_bound_size" - purpose test only (for
+  !! auto-validation tests)
+  !!    @param[in]        bound_size   = wanted value of "bl_bound_part"
+  subroutine AC_set_part_bound_size(bound_size)
 
     ! Input/Ouput
     integer, intent(in) :: bound_size
 
     bl_bound_size = bound_size
 
-end subroutine AC_set_part_bound_size
+  end subroutine AC_set_part_bound_size
 
 
 end module advec_variables
diff --git a/HySoP/src/Unstable/LEGI/test/CMakeLists.txt b/HySoP/src/Unstable/LEGI/test/CMakeLists.txt
deleted file mode 100644
index febd4f0ab..000000000
--- a/HySoP/src/Unstable/LEGI/test/CMakeLists.txt
+++ /dev/null
@@ -1 +0,0 @@
-add_subdirectory(src)
-- 
GitLab