diff --git a/HySoP/CMake/CMakeListsForTests.cmake b/HySoP/CMake/CMakeListsForTests.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..a149f3ae3ca7d983e9fa5c8c356a843bde4faa5b
--- /dev/null
+++ b/HySoP/CMake/CMakeListsForTests.cmake
@@ -0,0 +1,26 @@
+# -*- cmake -*-
+# This is the test cmake configuration
+# built from @CMAKE_SOURCE_DIR@/cmake/CMakeListsForTests.cmake.in 
+
+# scan the list of test executables
+foreach(_EXE ${_EXE_LIST_${_CURRENT_TEST_DIRECTORY}})
+ 
+  message(STATUS "Adding test suite ${_CURRENT_TEST_DIRECTORY}/${_EXE}")
+  
+  # Create an executable for the current test 
+  add_executable(${_EXE} ${${_EXE}_FSOURCES})
+
+  # Add a dependency between current test and the main library target
+  add_dependencies(${_EXE} ${PROJECT_LIBRARY_NAME})
+  # link current target test with the same lib as for main project lib
+  target_link_libraries(${_EXE} ${PROJECT_LIBRARY_NAME})
+  target_link_libraries(${_EXE} ${LIBS})
+  
+  # add test for ctest
+  add_test(${_EXE} ${_EXE})
+
+  set_tests_properties(${_EXE} PROPERTIES FAIL_REGULAR_EXPRESSION "FAILURE;Exception;failed;ERROR")
+  message("ADD MPI TESTS ...")
+  add_test(NAME mpi_${_EXE} COMMAND mpirun -np 8 ${_EXE})
+
+endforeach(_EXE ${_EXE_LIST_${_CURRENT_TEST_DIRECTORY}})
diff --git a/HySoP/CMake/InstallPackage.cmake b/HySoP/CMake/InstallPackage.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..a594a0b3d22f7b0c1440f065dbfcdc3f6c574012
--- /dev/null
+++ b/HySoP/CMake/InstallPackage.cmake
@@ -0,0 +1,62 @@
+#===========================================================
+# Macro to install parmes package
+#
+# F. Pérignon, LJK-CNRS, april 2011
+#
+#===========================================================
+
+macro(install_package)
+  
+  if(ARGV2)
+    set(_HEADERS ${ARGV2})
+  endif()
+
+  # Offer the user the choice of overriding the installation directories
+  set(INSTALL_LIB_DIR lib CACHE PATH "Installation directory for libraries")
+  set(INSTALL_BIN_DIR bin CACHE PATH "Installation directory for executables")
+  set(INSTALL_INCLUDE_DIR include CACHE PATH "Installation directory for header files")
+  set(INSTALL_DATA_DIR share CACHE PATH "Installation directory for data files")
+
+  # Make relative paths absolute (needed later on)
+  foreach(p LIB BIN INCLUDE DATA)
+    set(var INSTALL_${p}_DIR)
+    if(NOT IS_ABSOLUTE "${${var}}")
+      set(${var} "${CMAKE_INSTALL_PREFIX}/${${var}}")
+    endif()
+  endforeach()
+  
+  # The library
+  install(TARGETS ${ARGV1} 
+    EXPORT ${ARGV0}LibraryDepends
+    ARCHIVE DESTINATION "${INSTALL_LIB_DIR}"  # static libs
+    LIBRARY DESTINATION "${INSTALL_LIB_DIR}"  COMPONENT shlib # shared libs
+    PUBLIC_HEADER DESTINATION "${INSTALL_INCLUDE_DIR}" COMPONENT dev
+    )
+  
+  # The headers and modules
+  if(_HEADERS)
+    install(FILES ${_HEADERS} DESTINATION "${INSTALL_INCLUDE_DIR}")
+  endif() 
+  install(DIRECTORY ${CMAKE_BINARY_DIR}/Modules DESTINATION "${INSTALL_INCLUDE_DIR}")
+  
+  export(TARGETS ${ARGV1} FILE "${PROJECT_BINARY_DIR}/InstallFiles/${ARGV0}LibraryDepends.cmake")
+  
+  # Install the export set for use with the install-tree
+  install(EXPORT ${ARGV0}LibraryDepends DESTINATION
+    "${INSTALL_DATA_DIR}/CMake" COMPONENT dev)
+  
+  set(${ARGV0}_INCLUDE_DIRS "${INSTALL_INCLUDE_DIR}")
+  set(${ARGV0}_LIB_DIR "${INSTALL_LIB_DIR}")
+  set(${ARGV0}_CMAKE_DIR "${INSTALL_DATA_DIR}/CMake")
+
+  display(${ARGV0}_CMAKE_DIR)
+  configure_file(${CMAKE_SOURCE_DIR}/${ARGV0}Config.cmake.in
+    "${PROJECT_BINARY_DIR}/InstallFiles/${ARGV0}Config.cmake")
+  configure_file(${CMAKE_SOURCE_DIR}/${ARGV0}ConfigVersion.cmake.in
+    "${PROJECT_BINARY_DIR}/InstallFiles/${ARGV0}ConfigVersion.cmake" @ONLY)
+  install(FILES
+    "${PROJECT_BINARY_DIR}/InstallFiles/${ARGV0}Config.cmake"
+    "${PROJECT_BINARY_DIR}/InstallFiles/${ARGV0}ConfigVersion.cmake"
+    DESTINATION "${${ARGV0}_CMAKE_DIR}" COMPONENT dev)
+  
+endmacro()
diff --git a/HySoP/CMake/ParmesTests.cmake b/HySoP/CMake/ParmesTests.cmake
index a054e26ed9884ccd492a47291a66bf323953bd66..e8164044d066f4e42a19ec65ba1c810b7d3b12fa 100644
--- a/HySoP/CMake/ParmesTests.cmake
+++ b/HySoP/CMake/ParmesTests.cmake
@@ -1,7 +1,26 @@
+# === Configuration for tests in Parmes ===
+#
+# --> collect test directories/files
+# --> create tests (ctest)
+#
+# Those tests will be run after a call to 'make test'
+# or a call to ctest.
+
 enable_testing()
 find_python_module(pytest REQUIRED)
 
-# ---  We create a new test for each test_XXX.py found in each directory (i.e. module) of parmepy listed below ---
+# Choose python build dir as directory where tests will be run.
+# --> get build dir for python
+execute_process(
+  COMMAND ${PYTHON_EXECUTABLE} -c "import distutils.util as ut ; import distutils.sysconfig as sy; print 'lib.'+ut.get_platform()+'-'+sy.get_python_version()"
+  OUTPUT_VARIABLE ${PROJECT_NAME}_PYTHON_BUILD_DIR)
+string(STRIP ${${PROJECT_NAME}_PYTHON_BUILD_DIR} ${PROJECT_NAME}_PYTHON_BUILD_DIR)
+
+# --> set test dir
+set(testDir ${CMAKE_BINARY_DIR}/build/${${PROJECT_NAME}_PYTHON_BUILD_DIR})
+# 1 - Collect files for Python unitary tests (user-defined) ----- 
+
+# We create a new test for each test_XXX.py found in each directory (i.e. module) of parmepy listed below
 
 set(py_src_dirs
   fields
@@ -12,38 +31,33 @@ set(py_src_dirs
   tools
   )
 
+# If mpi is on, we add test_XXX.py files of parmepy/mpi directory
 if(USE_MPI)
   set(py_src_dirs
     ${py_src_dirs} mpi)
 endif()
 
+# If GPU is on, we add test_XXX.py files of parmepy/gpu directory
 if(WITH_GPU)
   set(py_src_dirs
     ${py_src_dirs} gpu)
 endif()
 
-# Set test dir to python build dir.
-set(testDir ${CMAKE_BINARY_DIR}/build/${${PROJECT_NAME}_PYTHON_BUILD_DIR})
-set(ENV{PYTHONPATH} ${testDir})
-
-display(testDir)
-
-## Copy the OpenCL sources files to build dir (only python files are copied by setup.py)
+# Copy the OpenCL sources files to build dir (required since only python files are copied by setup.py)
 set(clfiles)
-file(GLOB clfilestmp RELATIVE ${CMAKE_SOURCE_DIR} parmepy/gpu/cl_src/*.cl)
+file(GLOB clfilestmp RELATIVE ${CMAKE_SOURCE_DIR} parmepy/gpu/cl_src/[a-z]*.cl)
 set(clfiles ${clfiles} ${clfilestmp})
-file(GLOB clfilestmp RELATIVE ${CMAKE_SOURCE_DIR} parmepy/gpu/cl_src/kernels/*.cl)
+file(GLOB clfilestmp RELATIVE ${CMAKE_SOURCE_DIR} parmepy/gpu/cl_src/kernels/[a-z]*.cl)
 set(clfiles ${clfiles} ${clfilestmp})
-file(GLOB clfilestmp RELATIVE ${CMAKE_SOURCE_DIR} parmepy/gpu/cl_src/advection/*.cl)
+file(GLOB clfilestmp RELATIVE ${CMAKE_SOURCE_DIR} parmepy/gpu/cl_src/advection/[a-z]*.cl)
 set(clfiles ${clfiles} ${clfilestmp})
-file(GLOB clfilestmp RELATIVE ${CMAKE_SOURCE_DIR} parmepy/gpu/cl_src/remeshing/*.cl)
+file(GLOB clfilestmp RELATIVE ${CMAKE_SOURCE_DIR} parmepy/gpu/cl_src/remeshing/[a-z]*.cl)
 set(clfiles ${clfiles} ${clfilestmp})
 foreach(_F ${clfiles})
   configure_file(${_F} ${testDir}/${_F} COPYONLY)
 endforeach()
 
-
-## Build a list of test_*.py files for each directory of parmepy/${py_src_dirs}
+# Build a list of test_*.py files for each directory of parmepy/${py_src_dirs}
 set(py_test_files)
 foreach(testdir ${py_src_dirs})
   file(GLOB testfiles RELATIVE ${CMAKE_SOURCE_DIR} parmepy/${testdir}/tests/test_*.py)
@@ -51,15 +65,19 @@ foreach(testdir ${py_src_dirs})
   # copy data files
   file(GLOB datfiles parmepy/${testdir}/tests/*.dat)
   file(GLOB mapfiles parmepy/${testdir}/tests/*.map)
+  file(GLOB reffiles parmepy/${testdir}/tests/ref_files/*)
   set(datafiles ${mapfiles} ${datfiles})
+  file(COPY ${reffiles} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/dataForTests)
   foreach(_F ${datafiles})
     get_filename_component(fname ${_F} NAME)
     configure_file(${_F} ${CMAKE_CURRENT_BINARY_DIR}/dataForTests/${fname} COPYONLY)
   endforeach()
 endforeach()
 
-## Handling doctest in *.py files recursively for each directory of parmepy/${py_src_dirs}
-## that names are not __init__ or test_ and that contains '>>>'
+# 2 - Collect files for Python doctest ----- 
+# Handling doctest in *.py files recursively for each directory of parmepy/${py_src_dirs}
+# excluding  __init__ or test_ files. 
+# Doctest are run for every line which contains '>>>'
 set(py_doctest_files)
 foreach(testdir ${py_src_dirs})
   file(GLOB testfiles parmepy/${testdir}/[a-zA-Z]*.py)
@@ -71,32 +89,38 @@ foreach(testdir ${py_src_dirs})
   endforeach()
 endforeach()
 
-## Adding tests
+# 3 - Create tests for all collected files -----
 message(STATUS "=== TESTS === ")
-## Add test_*.py files
 foreach(testfile ${py_test_files})
   get_filename_component(testName ${testfile} NAME_WE)
   set(testExe ${testDir}/${testfile})
   message(STATUS "Add test ${testfile} ...")
   if(FULL_TEST)
-    add_test(NAME ${testName} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/dataForTests COMMAND py.test -v --pep8 ${testExe})
+    add_test(NAME ${testName} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/dataForTests
+     COMMAND py.test -v --pep8 ${testExe})
   else()
-    add_test(NAME ${testName} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/dataForTests COMMAND py.test -v ${testExe})
+    add_test(NAME ${testName} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/dataForTests
+    COMMAND py.test -v ${testExe})
   endif()
-
+  
+  # Run the same test using mpi multi process run.
+  # The number of processes used is set with NBPROCS_FOR_TESTS variable (user option for cmake, default=8)
   if(WITH_MPI_TESTS)
-    message(STATUS "Add mpi test mpi_${testName} ${NBPROCS_FOR_TESTS}")
-    add_test(NAME mpi_${testName} COMMAND mpirun -np ${NBPROCS_FOR_TESTS} ${PYTHON_EXECUTABLE} ${testExe})
+    message(STATUS "Add test mpi_${testName}.")
+    add_test(
+      NAME mpi_${testName}
+      WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/dataForTests
+      COMMAND mpirun -np ${NBPROCS_FOR_TESTS} ${PYTHON_EXECUTABLE} ${testExe}
+      )
+    set_tests_properties(mpi_${testName}
+      PROPERTIES ENVIRONMENT "PYTHONPATH=${testDir}")
   endif()
 endforeach()
-# Add files containing doctests
 
+# Add files containing doctests
 foreach(testfile ${py_doctest_files})
   get_filename_component(testName ${testfile} NAME_WE)
   message(STATUS "Add test from doc doctest_${testName} ...")
   add_test("doctest_${testName}" py.test -v --doctest-modules ${testfile})
 endforeach()
 message(STATUS "===")
-
-
-#configure_file(conftest.py.in conftest.py)
\ No newline at end of file
diff --git a/HySoP/CMake/PythonSetup.cmake b/HySoP/CMake/PythonSetup.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..4a6daf4091a853f46d3b0ee57468327c7b5edd95
--- /dev/null
+++ b/HySoP/CMake/PythonSetup.cmake
@@ -0,0 +1,23 @@
+# Find python exec and python libs
+# Check compatibility.
+
+find_package(PythonInterp)
+get_filename_component(PYTHON_DIR ${PYTHON_EXECUTABLE} REALPATH)
+
+if(CMAKE_PATCH_VERSION LESS 12)
+  set(PYTHON_DIR ${PYTHON_DIR}/..)
+  get_filename_component(PYTHON_DIR ${PYTHON_DIR} REALPATH)
+else()
+  get_filename_component(PYTHON_DIR ${PYTHON_DIR} DIRECTORY)
+endif()
+
+set(PYTHON_main_version ${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR})
+set(PythonLibs_FIND_VERSION ${PYTHON_VERSION_STRING})
+set(PYTHON_INCLUDE_DIR ${PYTHON_DIR}/../include/python${PYTHON_main_version})
+find_package(PythonLibs)
+if(NOT PYTHONLIBS_VERSION_STRING VERSION_EQUAL PYTHON_VERSION_STRING)
+  display(PYTHONLIBS_VERSION_STRING)
+  display(PYTHON_VERSION_STRING)
+  message(FATAL_ERROR "Python library and executable versions do not match.")
+endif()
+include(FindPythonModule)
diff --git a/HySoP/CMake/TestFortranAcceptsFlag.cmake b/HySoP/CMake/TestFortranAcceptsFlag.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..73f3b1a1d4cd2d1612bc7203e8987f0ef59c46d0
--- /dev/null
+++ b/HySoP/CMake/TestFortranAcceptsFlag.cmake
@@ -0,0 +1,38 @@
+# - Test Fortran compiler for a flag
+# Check if the Fortran compiler accepts a flag
+#
+#  Macro CHECK_Fortran_ACCEPTS_FLAG(FLAGS VARIABLE) -
+#     checks if the function exists
+#  FLAGS - the flags to try
+#  VARIABLE - variable to store the result
+#
+# F. Pérignon - LJK/CNRS - March 2011 
+# From Kitware TestCXXAcceptsFlag.cmake 
+#
+
+MACRO(CHECK_Fortran_ACCEPTS_FLAG FLAGS  VARIABLE)
+  IF(NOT DEFINED ${VARIABLE})
+    MESSAGE(STATUS "Checking to see if Fortran compiler accepts flag ${FLAGS}")
+    FILE(WRITE  ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/Dummy.f 
+      "program TESTFortran
+        implicit none
+        print *, 'Hello'
+        end program  ")
+    TRY_COMPILE(${VARIABLE}
+      ${CMAKE_BINARY_DIR}
+      ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/Dummy.f
+      CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${FLAGS}
+      OUTPUT_VARIABLE OUTPUT) 
+    IF(${VARIABLE})
+      MESSAGE(STATUS "Checking to see if Fortran compiler accepts flag ${FLAGS} - yes")
+      FILE(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log
+        "Determining if the Fortran compiler accepts the flag ${FLAGS} passed with "
+        "the following output:\n${OUTPUT}\n\n")
+    ELSE(${VARIABLE})
+      MESSAGE(STATUS "Checking to see if Fortran compiler accepts flag ${FLAGS} - no")
+      FILE(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
+        "Determining if the Fortran compiler accepts the flag ${FLAGS} failed with "
+        "the following output:\n${OUTPUT}\n\n")
+    ENDIF(${VARIABLE})
+  ENDIF(NOT DEFINED ${VARIABLE})
+ENDMACRO(CHECK_Fortran_ACCEPTS_FLAG)
diff --git a/HySoP/CMakeLists.txt b/HySoP/CMakeLists.txt
index febe4a7d264e2a15bf26bd6c1432f751a3de5f8a..e511de777a4ac71ee02cd96d96f526335edfa085 100644
--- a/HySoP/CMakeLists.txt
+++ b/HySoP/CMakeLists.txt
@@ -3,13 +3,10 @@
 #
 # It includes :
 #  - high level python interface to parmes routines
-#  - parmes fortran library (libparmes...)
-#  - parmesscales (fortran library) particular solver from scales, (libparmesscales...)
+#  - parmes fortran library (with fftw solver and scales interface)
 #
 #  parmes depends (optionally) on :
-#   - parmesscales (WITH_SCALES=ON, default)
-#   - ppm-core (USE_PPM=ON, default=OFF)
-#   - ppm-numerics (USE_PPMNumerics, default=OFF)
+#   - scales (WITH_SCALES=ON, default)
 #
 # LJK-CNRS, F. Pérignon, june 2012
 #
@@ -17,6 +14,9 @@
 # ============= Global cmake Settings =============
 # Set minimum version for cmake
 cmake_minimum_required(VERSION 2.8.7)
+if(POLICY CMP0042)
+  cmake_policy(SET CMP0042 OLD)
+endif()
 # Set cmake modules directory (i.e. the one which contains all user-defined FindXXX.cmake files among other things)
 set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/CMake)
 # Force out-of-source build
@@ -28,10 +28,8 @@ include(MyTools)
 option(VERBOSE_MODE "enable verbose mode for cmake exec. Default = on" ON)
 option(DOUBLEPREC "set precision for real numbers to double precision when this mode is enable. Default = on." ON)
 option(USE_MPI "compile and link parmes with mpi when this mode is enable. Default = on." ON)
-option(WITH_PPM "link Parmes with PPM library (core component) - Deprecated. Default = off." OFF)
-option(WITH_PPMNumerics "link Parmes with PPM-numerics - Deprecated. Default = off" OFF)
 option(WITH_TESTS "Enable testing. Default = off" ON)
-option(BUILD_SHARED_LIBS "Enable dynamic library build, default = OFF." ON)
+option(BUILD_SHARED_LIBS "Enable dynamic library build, default = ON." ON)
 option(WITH_LIB_FORTRAN "Generate libparmes from fortran files in src, wrapped into parmepy.f2py module. Default = ON." ON)
 option(WITH_SCALES "compile/create parmesscales lib and link it with Parmes. Default = ON." ON)
 option(WITH_FFTW "Link with fftw library (required for some Parmes solvers), default = ON" ON)
@@ -42,23 +40,19 @@ option(FULL_TEST "Enable all test options (pep8, mpi ...) - Default = OFF" OFF)
 option(PROFILE "Enable profiling mode for Parmes. 0:disabled, 1: enabled. Default = 0" 0)
 option(OPTIM "To allow python -OO run, some packages must be deactivated. Set this option to 'ON' to do so. Default = OFF" OFF)
 option(WITH_MPI_TESTS "Enable mpi tests. Default = ON if USE_MPI is ON." ON)
+
 if(NOT WITH_LIB_FORTRAN)
   message(WARNING "You deactivate libparmes (fortran) generation. This will disable fftw and scales fonctionnalities.")
   set(WITH_FFTW "OFF")
   set(WITH_SCALES "OFF")
   set(WITH_MAIN_FORTRAN "OFF")
 endif()
-set(NBPROCS_FOR_TESTS 4)
 
 # We can not run scales or fftw without mpi ...
 if(WITH_FFTW OR WITH_SCALES)
   set(USE_MPI "ON")
 endif()
 
-if(NOT USE_MPI)
-  set(WITH_MPI_TESTS "OFF")
-endif()
-
 # cmake project name
 set(PROJECT_NAME parmepy)
 # --- Name for the package ---
@@ -69,7 +63,6 @@ set(PACKAGE_VERSION 1.0.0)
 set(${PYPACKAGE_NAME}_version ${PACKAGE_VERSION})
 # --- The name (without extension) of the lib to be created ---
 set(PROJECT_LIBRARY_NAME ${PROJECT_NAME})
-
 # ============= The project =============
 # Set project name and project languages
 # => this automatically defines:
@@ -86,144 +79,63 @@ set(PARMES_LIBRARY_NAME parmes)
 set(PACKAGE_NAME Parmes)
 
 # --- Python ---
-find_package(PythonInterp)
-get_filename_component(PYTHON_DIR ${PYTHON_EXECUTABLE} REALPATH)
-
-if(CMAKE_PATCH_VERSION LESS 12)
-  set(PYTHON_DIR ${PYTHON_DIR}/..)
-  get_filename_component(PYTHON_DIR ${PYTHON_DIR} REALPATH)
-else()
-  get_filename_component(PYTHON_DIR ${PYTHON_DIR} DIRECTORY)
-endif()
-
-set(PYTHON_main_version ${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR})
-set(PythonLibs_FIND_VERSION ${PYTHON_VERSION_STRING})
-set(PYTHON_INCLUDE_DIR ${PYTHON_DIR}/../include/python${PYTHON_main_version})
-find_package(PythonLibs)
-if(NOT PYTHONLIBS_VERSION_STRING VERSION_EQUAL PYTHON_VERSION_STRING)
-  display(PYTHONLIBS_VERSION_STRING)
-  display(PYTHON_VERSION_STRING)
-  message(FATAL_ERROR "Python library and executable versions do not match.")
-endif()
-include(FindPythonModule)
+# - Global setup (interp and lib) -
+include(PythonSetup)
+# - python packages -
 find_python_module(numpy REQUIRED)
+find_python_module(scipy REQUIRED)
 find_python_module(matplotlib)
 if(NOT matplotlib_FOUND)
   find_python_module(Gnuplot REQUIRED)
 endif()
 find_python_module(scitools)
 find_python_module(h5py REQUIRED)
-#find_python_module(evtk)
-
-## About install :
-# if :
-# 1 - cmake ... ==> try to install in site.USER_SITE of python (equivalent to python setup.py install --user)
-#  if you are using virtualenv, USER_SITE is not enable and install will be done in site-package of the virtualenv. 
-# 2 - cmake -DCMAKE_INSTALL_PREFIX = some_path ==> install in some_path as base (equivalent to python setup.py install --prefix=<path>)
-# Note that case 2 require a proper set of PYTHONPATH environment var for parmepy to work.
-# 
-#if(NOT first_run)
-#  message(STATUS " THIS IS THE FIRST RUN !!!!!! ")
-#  set(first_run 1 CACHE INTERNAL "True if first cmake run")
-#endif()
-
-if(NOT CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT AND NOT first_run)
-    set(first_run 1 CACHE INTERNAL "True if first cmake run")
-  
-    # if 'CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT' is false, it means
-    # that -DCMAKE_INSTALL_PREFIX=something has been given to cmake.
-
-    # A prefix is given by user. installation in PREFIX (as python --prefix=<PREFIX>).
-    set(install-opt "--prefix=${CMAKE_INSTALL_PREFIX}")
-    # Need to set the CMAKE_INSTALL_PREFIX to the proper site (subdirectory of PREFIX)
-    # Get python install suffix (~> lib/python/site-package) from numpy package
-    set(PYTHON_COMMAND_GET_INSTALL_DIR "import os, re, numpy; print os.path.join(os.path.join(\"${CMAKE_INSTALL_PREFIX}\",*re.compile('/numpy/__init__.py.*').sub('',numpy.__file__).split('/')[-3:]), \"${PROJECT_NAME}\")")
-else()
-    set(first_run 1 CACHE INTERNAL "True if first cmake run")
-    # First, we need to check if '--user' option works in the current environment.
-    execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import site; print site.ENABLE_USER_SITE" OUTPUT_VARIABLE ENABLE_USER)
-    string(STRIP ${ENABLE_USER} ENABLE_USER)
-    if(ENABLE_USER)
-        # Default prefix (as python --user) -> installation in site.USER_SITE
-	# Note FP : for brew users, you need to set prefix= nothing in file /usr/local/Cellar/python/2.7.6_1/Frameworks/Python.framework/Versions/2.7/lib/python2.7/distutils/distutils.cfg
-	# or create a ~/.pydistutils.cfg  with:
-	# [install]
-	# prefix=
-	set(install-opt "--user")
-	# Need to set the CMAKE_INSTALL_PREFIX to site.USER_SITE
-	# Get python user site and install path = USER_SITE + project_name
-	set(PYTHON_COMMAND_GET_INSTALL_DIR "import site, os ; print os.path.join(site.USER_SITE, \"${PROJECT_NAME}\")")
-    else() # user site not included in the path, which probably means that python is run using virtualenv
-        # Command to find 'global' site-package
-	set(GET_SITE_PACKAGE "from distutils.sysconfig import get_python_lib; print(get_python_lib())")
-	execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "${GET_SITE_PACKAGE}" OUTPUT_VARIABLE GLOBAL_SITE_PACKAGE)
-	string(STRIP ${GLOBAL_SITE_PACKAGE} GLOBAL_SITE_PACKAGE)
-	set(PYTHON_COMMAND_GET_INSTALL_DIR ${GET_SITE_PACKAGE})
-	set(install-opt "")
-    endif()
-endif()
-
-#
-#Get build dir for python
-execute_process(
-  COMMAND ${PYTHON_EXECUTABLE} -c "import distutils.util as ut ; import distutils.sysconfig as sy; print 'lib.'+ut.get_platform()+'-'+sy.get_python_version()"
-  OUTPUT_VARIABLE ${PROJECT_NAME}_PYTHON_BUILD_DIR)
-string(STRIP ${${PROJECT_NAME}_PYTHON_BUILD_DIR} ${PROJECT_NAME}_PYTHON_BUILD_DIR)
-# Target to remove parmepy from install-path.
-# Set the CMAKE_INSTALL_DIR to the proper path
-execute_process(
-       COMMAND ${PYTHON_EXECUTABLE} -c "${PYTHON_COMMAND_GET_INSTALL_DIR}"
-       OUTPUT_VARIABLE ${PROJECT_NAME}_INSTALL_DIR)
-string(STRIP ${${PROJECT_NAME}_INSTALL_DIR} ${PROJECT_NAME}_INSTALL_DIR)
-set(CMAKE_INSTALL_PREFIX ${${PROJECT_NAME}_INSTALL_DIR} CACHE PATH "default install path" FORCE)
-if(CMAKE_PATCH_VERSION LESS 12)
-  set(dirToBeRemoved ${CMAKE_INSTALL_PREFIX}/..)
-  get_filename_component(dirToBeRemoved ${dirToBeRemoved} REALPATH)
-else()
-  set(dirToBeRemoved ${CMAKE_INSTALL_PREFIX})
-  get_filename_component(dirToBeRemoved ${dirToBeRemoved} DIRECTORY)
-endif()
-display(dirToBeRemoved)
-add_custom_target(uninstall COMMAND rm -rf ${dirToBeRemoved}/${PYPACKAGE_NAME}*
-  COMMENT "Remove ${dirToBeRemoved}/${PYPACKAGE_NAME} directory (parmepy package and its dependencies)")
-
-
+find_python_module(sympy REQUIRED)
 # --- OpenCL ---
 find_python_module(pyopencl REQUIRED)
+# --- MPI ---
+if(USE_MPI)
+  find_python_module(mpi4py REQUIRED)
+endif()
 
-find_python_module(sympy REQUIRED)
+# ========= Check which opencl devices are available on the system =========
 if(WITH_GPU)
-execute_process(
+  execute_process(
     COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_SOURCE_DIR}/opencl_explore.py "EXPLORE")
-execute_process(
+  execute_process(
     COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_SOURCE_DIR}/opencl_explore.py
     OUTPUT_VARIABLE OPENCL_DEFAULT_OPENCL_ID)
 else()
-execute_process(
+  execute_process(
     COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_SOURCE_DIR}/opencl_explore.py "EXPLORE" CPU)
-execute_process(
+  execute_process(
     COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_SOURCE_DIR}/opencl_explore.py CPU
     OUTPUT_VARIABLE OPENCL_DEFAULT_OPENCL_ID)
 endif()
+
 string(REPLACE " " ";" MY_LIST ${OPENCL_DEFAULT_OPENCL_ID})
 list(GET MY_LIST 0 OPENCL_DEFAULT_OPENCL_PLATFORM_ID)
 list(GET MY_LIST 1 OPENCL_DEFAULT_OPENCL_DEVICE_ID)
 display(OPENCL_DEFAULT_OPENCL_PLATFORM_ID)
 display(OPENCL_DEFAULT_OPENCL_DEVICE_ID)
 
+#  ====== Create (and setup) install/uninstall targets ======
+#
+# --> set installation dir
+# --> set options for python install
+# --> create install/uninstall targets
 
-# --- MPI ---
+include(ParmesInstallSetup)
+# Remark : this must be done before add_subdir below, since install process in src needs CMAKE_INSTALL_PREFIX
+# to be properly set.
 
-if(USE_MPI)
-  find_python_module(mpi4py REQUIRED)
-endif()
-
-# Deal with src/files to create libparmes and related.
+# ====== Create non-python (fortran) libraries (fftw and scales interfaces), if required =====
 if(WITH_LIB_FORTRAN)
   add_subdirectory(src)
 endif()
 
-# ============= Generates setup.py =============
+# ====== Generates python files required for build/install process ======
+
 # The file setup.py will be generated from setup.py.in.
 if(EXISTS ${CMAKE_SOURCE_DIR}/setup.py.in)
   message(STATUS "Generate setup.py file ...")
@@ -237,19 +149,22 @@ if(EXISTS ${CMAKE_SOURCE_DIR}/parmepy/__init__.py.in)
   configure_file(parmepy/__init__.py.in ${CMAKE_SOURCE_DIR}/parmepy/__init__.py)
 endif()
 
+# ====== Create (and setup) build target ======
 set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES ${CMAKE_BINARY_DIR}/build)
 
-add_custom_target(python-build ALL COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/setup.py build config_fc --f90exec=${CMAKE_Fortran_COMPILER}
-   WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMENT "build parmepy package")
-
-# To install python package AND parmes library and modules
-add_custom_target(python-install COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/setup.py install ${install-opt} config_fc --f90exec=${CMAKE_Fortran_COMPILER}
-  #COMMAND make install
-  WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMENT "build/install parmepy package")
+if(WITH_LIB_FORTRAN)
+  add_custom_target(python-build ALL 
+    COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/setup.py build config_fc --f90exec=${CMAKE_Fortran_COMPILER}
+    WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMENT "build parmepy package")
+  add_dependencies(python-build ${PARMES_LIBRARY_NAME})
+else()
+  add_custom_target(python-build ALL 
+    COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/setup.py build
+    WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMENT "build parmepy package")
+endif()
 
-install(CODE "execute_process(COMMAND ${CMAKE_BUILD_TOOL} python-install WORKING_DIRECTORY \"${CMAKE_CURRENT_BINARY_DIR}\")")
+# ====== Create a Target to clean sources (remove .pyc files) and build dir ======
 
-# Target to clean sources (remove .pyc files) and build dir.
 file(GLOB_RECURSE PYCFILES "${CMAKE_SOURCE_DIR}/*.pyc")
 add_custom_target(pyclean COMMAND rm -f ${PYCFILES}
   COMMAND make clean
@@ -257,10 +172,13 @@ add_custom_target(pyclean COMMAND rm -f ${PYCFILES}
   COMMAND rm ${CMAKE_SOURCE_DIR}/parmepy/__init__.py
   COMMENT "clean parmepy sources and build.")
 
-# Target to generate doxygen documentation
-# Documentation generation
+# ====== Create a Target to generate the documentation ======
 find_package(Doxygen)
 if(DOXYGEN_FOUND)
+  find_file(DOXY name doxypy.py PATH ENV{PATH})
+  if(DOXY-NOTFOUND)
+    message(STATUS "Warning, doxypy seems to be missing on your system. You may not be able to properly generate the documentation.")
+  endif()
   configure_file(${CMAKE_SOURCE_DIR}/DoxyConf/parmes.doxyfile.in ${CMAKE_BINARY_DIR}/DoxyConf/parmes.doxyfile)
   add_custom_target(doc COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_BINARY_DIR}/DoxyConf/parmes.doxyfile
   COMMENT "Generate parmepy documentation using doxygen.")
@@ -270,33 +188,13 @@ else()
   COMMENT "Generate parmepy documentation using doxygen.")
 endif()
 
-
-if(WITH_LIB_FORTRAN)
-  add_dependencies(python-build ${PARMES_LIBRARY_NAME})
-  add_dependencies(python-install ${PARMES_LIBRARY_NAME})
-endif()
-
-
-file(GLOB _DIR_FILES_EXT ${_DIR}/${_EXT})
-
-
-# ============= RPATH =============
-# Concerning rpath see for example http://www.itk.org/Wiki/CMake_RPATH_handling
-
-# --------------------------------------------
-# do not skip the full RPATH for the build tree
-set(CMAKE_SKIP_BUILD_RPATH FALSE)
-# when building, don't use the install RPATH already
-# (but later on when installing)
-set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE)
-# the RPATH to be used when installing
-set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib")
-# add the automatically determined parts of the RPATH
-# which point to directories outside the build tree to the install RPATH
-set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
-
 # ============= Tests =============
 if(WITH_TESTS)
+  # Number of mpi processes used to run tests.
+  set(NBPROCS_FOR_TESTS 8)
+  if(NOT USE_MPI)
+    set(WITH_MPI_TESTS "OFF")
+  endif()
   include(ParmesTests)
 endif(WITH_TESTS)
 
@@ -318,7 +216,6 @@ if(VERBOSE_MODE)
   message(STATUS " Project uses Scales : ${WITH_SCALES}")
   message(STATUS " Project uses FFTW : ${WITH_FFTW}")
   message(STATUS " Project uses GPU : ${WITH_GPU}")
-  message(STATUS " Project will be built in build/${${PROJECT_NAME}_PYTHON_BUILD_DIR}.")
   message(STATUS " ${PROJECT_NAME} debug mode : ${DEBUG}")
   message(STATUS " Enable -OO run? : ${OPTIM}")
   message(STATUS "====================== ======= ======================")
@@ -327,8 +224,11 @@ if(VERBOSE_MODE)
   message(STATUS " 'make -jN' to build the project, N being the number of available processes.")
   message(STATUS " 'make install' to install python modules and their dependencies. ")
   message(STATUS " 'make doc' to generate doxygen documentation for parmepy.")
-  message(STATUS " 'make test' to run some test (after the build!).")
+  message(STATUS " 'make test' to run some test (after the build! Do not use -j with this target).")
   message(STATUS " 'make clean' to clean build directory.")
   message(STATUS " 'make uninstall' to clean install directory. Dry-run (make -n uninstall) is advisable to check what will really be deleted.")
+  message(STATUS "\n\n/!\\ Warning /!\\ : depending on your python environment configuration, you may need to set PYTHONPATH.")
+  message("Try to run python -c 'import parmepy'. If it fails, add ${${PROJECT_NAME}_PYTHONPATH} to PYTHONPATH environment variable.")
+  message("Example : \n export PYTHONPATH=${${PROJECT_NAME}_PYTHONPATH}:\${PYTHONPATH}\n")
 endif()
 
diff --git a/HySoP/DoxyConf/parmes.doxyfile.in.bak b/HySoP/DoxyConf/parmes.doxyfile.in.bak
new file mode 100644
index 0000000000000000000000000000000000000000..64f0f003cfd2bea031613822432481764670ad86
--- /dev/null
+++ b/HySoP/DoxyConf/parmes.doxyfile.in.bak
@@ -0,0 +1,1809 @@
+# Doxyfile 1.8.2
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+#       TAG = value [value, ...]
+# For lists items can also be appended using:
+#       TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or sequence of words) that should
+# identify the project. Note that if you do not use Doxywizard you need
+# to put quotes around the project name if it contains spaces.
+
+PROJECT_NAME           = @PROJECT_NAME@
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER         = @PACKAGE_VERSION@
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer
+# a quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF          = "Particle Methods simulation on hybrid architectures"
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is
+# included in the documentation. The maximum height of the logo should not
+# exceed 55 pixels and the maximum width should not exceed 200 pixels.
+# Doxygen will copy the logo to the output directory.
+
+PROJECT_LOGO           =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = @CMAKE_BINARY_DIR@/DoxygenGeneratedDoc
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS         = YES
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
+# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE        = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF       =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES        = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip. Note that you specify absolute paths here, but also
+# relative paths, which will be relative from the directory where doxygen is
+# started.
+
+STRIP_FROM_PATH        =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH    =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful if your file system
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF      = YES
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE               = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES                =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding
+# "class=itcl::class" will allow you to use the command class in the
+# itcl::class meaning.
+
+TCL_SUBST              =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C  = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA   = YES
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN   = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension,
+# and language is one of the parsers supported by doxygen: IDL, Java,
+# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C,
+# C++. For instance to make doxygen treat .inc files as Fortran files (default
+# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note
+# that for custom extensions you also need to set FILE_PATTERNS otherwise the
+# files are not read by doxygen.
+
+EXTENSION_MAPPING      =
+
+# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
+# comments according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you
+# can mix doxygen, HTML, and XML commands with Markdown formatting.
+# Disable only in case of backward compatibilities issues.
+
+MARKDOWN_SUPPORT       = NO
+
+# When enabled doxygen tries to link words that correspond to documented classes,
+# or namespaces to their corresponding documentation. Such a link can be
+# prevented in individual cases by by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
+
+AUTOLINK_SUPPORT       = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also makes the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT    = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES (the
+# default) will make doxygen replace the get and set methods by a property in
+# the documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING            = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
+# unions are shown inside the group in which they are included (e.g. using
+# @ingroup) instead of on a separate page (for HTML and Man pages) or
+# section (for LaTeX and RTF).
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
+# unions with only public data fields will be shown inline in the documentation
+# of the scope in which they are defined (i.e. file, namespace, or group
+# documentation), provided this scope is documented. If set to NO (the default),
+# structs, classes, and unions are shown on a separate page (for HTML and Man
+# pages) or section (for LaTeX and RTF).
+
+INLINE_SIMPLE_STRUCTS  = NO
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT   = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penalty.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will roughly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols.
+
+SYMBOL_CACHE_SIZE      = 0
+
+# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
+# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
+# their name and scope. Since this can be an expensive process and often the
+# same symbol appear multiple times in the code, doxygen keeps a cache of
+# pre-resolved symbols. If the cache is too small doxygen will become slower.
+# If the cache is too large, memory is wasted. The cache size is given by this
+# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols.
+
+LOOKUP_CACHE_SIZE      = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL            = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+
+EXTRACT_PACKAGE        = YES
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC         = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS  = YES
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespaces are hidden.
+
+EXTRACT_ANON_NSPACES   = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS     = YES
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES     = YES
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS          = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES       = NO
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
+# rather than with sharp brackets.
+
+FORCE_LOCAL_INCLUDES   = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS       = NO
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = YES
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME     = YES
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
+# do proper type resolution of all parameters of a function it will reject a
+# match between the prototype and the implementation of a member function even
+# if there is only one candidate or it is obvious which candidate to choose
+# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
+# will still accept a match between prototype and implementation in such cases.
+
+STRICT_PROTO_MATCHING  = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS       =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or macro consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and macros in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES  = 29
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES        = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.  This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES        = NO
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER    =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
+# DoxygenLayout.xml will be used as the name of the layout file.
+
+LAYOUT_FILE            =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files
+# containing the references data. This must be a list of .bib files. The
+# .bib extension is automatically appended if omitted. Using this command
+# requires the bibtex tool to be installed. See also
+# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
+# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
+# feature you need bibtex and perl available in the search path.
+
+CITE_BIB_FILES         =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS               = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR      = YES
+
+# The WARN_NO_PARAMDOC option can be enabled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC       = YES
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE           =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT                  = @CMAKE_SOURCE_DIR@/parmepy \
+                         @CMAKE_SOURCE_DIR@/DoxyConf/mainpage.doxygen \
+                         @CMAKE_SOURCE_DIR@/src/fftw
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
+# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
+# *.f90 *.f *.for *.vhd *.vhdl
+
+FILE_PATTERNS          = *.doxygen \
+                         *.py \
+                         *.cl \
+                         *.f90
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE              = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE                =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       = */.svn/* \
+                         */tests/*
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS        =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH           =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS       = *
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH             =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.  If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER           =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.  Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.  The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty or if
+# non of the patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS        = *.py=/usr/local/bin/doxypy.py
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES    = YES
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
+# and it is also possible to disable source filtering for a specific pattern
+# using *.ext= (so without naming a filter). This option only has effect when
+# FILTER_SOURCE_FILES is enabled.
+
+FILTER_SOURCE_PATTERNS =
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C, C++ and Fortran comments will always remain visible.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION    = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.  Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS       = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX     = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX          =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header. Note that when using a custom header you are responsible
+# for the proper inclusion of any scripts and style sheets that doxygen
+# needs, which is dependent on the configuration options used.
+# It is advised to generate a default header using "doxygen -w html
+# header.html footer.html stylesheet.css YourConfigFile" and then modify
+# that header. Note that the header is subject to change so you typically
+# have to redo this when upgrading to a newer version of doxygen or when
+# changing the value of configuration settings such as GENERATE_TREEVIEW!
+
+HTML_HEADER            =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER            =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If left blank doxygen will
+# generate a default style sheet. Note that it is recommended to use
+# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this
+# tag will in the future become obsolete.
+
+HTML_STYLESHEET        =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional
+# user-defined cascading style sheet that is included after the standard
+# style sheets created by doxygen. Using this option one can overrule
+# certain style aspects. This is preferred over using HTML_STYLESHEET
+# since it does not replace the standard style sheet and is therefor more
+# robust against future updates. Doxygen will copy the style sheet file to
+# the output directory.
+
+HTML_EXTRA_STYLESHEET  =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that
+# the files will be copied as-is; there are no commands or markers available.
+
+HTML_EXTRA_FILES       =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the style sheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The allowed range is 0 to 359.
+
+HTML_COLORSTYLE_HUE    = 115
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
+# grayscales only. A value of 255 will produce the most vivid colors.
+
+HTML_COLORSTYLE_SAT    = 115
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# and 100 does not change the gamma.
+
+HTML_COLORSTYLE_GAMMA  = 124
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
+# this to NO can help when comparing the output of multiple runs.
+
+HTML_TIMESTAMP         = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+
+HTML_DYNAMIC_SECTIONS  = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
+# entries shown in the various tree structured indices initially; the user
+# can expand and collapse entries dynamically later on. Doxygen will expand
+# the tree to such a level that at most the specified number of entries are
+# visible (unless a fully collapsed tree already exceeds this amount).
+# So setting the number of entries 1 will produce a full collapsed tree by
+# default. 0 is a special value representing an infinite number of entries
+# and will result in a full expanded tree by default.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+
+GENERATE_DOCSET        = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME        = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID       = org.doxygen.Project
+
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely
+# identify the documentation publisher. This should be a reverse domain-name
+# style string, e.g. com.mycompany.MyDocSet.documentation.
+
+DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
+
+# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+
+DOCSET_PUBLISHER_NAME  = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP      = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE               =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION           =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI           = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING     =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND             = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
+# Qt Compressed Help (.qch) of the generated HTML documentation.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE               =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE          = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME   =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
+# Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS  =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
+# Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS  =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION           =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+# will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# the help appears.
+
+GENERATE_ECLIPSEHELP   = NO
+
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
+# this name.
+
+ECLIPSE_DOC_ID         = org.doxygen.Project
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
+# at top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it. Since the tabs have the same information as the
+# navigation tree you can set this option to NO if you already set
+# GENERATE_TREEVIEW to YES.
+
+DISABLE_INDEX          = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+# Since the tree basically has the same information as the tab index you
+# could consider to set DISABLE_INDEX to NO when enabling this option.
+
+GENERATE_TREEVIEW      = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
+# (range [0,1..20]) that doxygen will group on one line in the generated HTML
+# documentation. Note that a value of 0 will completely suppress the enum
+# values from appearing in the overview section.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH         = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# links to external symbols imported via tag files in a separate window.
+
+EXT_LINKS_IN_WINDOW    = NO
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE       = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
+# in the HTML output before the changes have effect.
+
+FORMULA_TRANSPARENT    = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
+# (see http://www.mathjax.org) which uses client side Javascript for the
+# rendering instead of using prerendered bitmaps. Use this if you do not
+# have LaTeX installed or if you want to formulas look prettier in the HTML
+# output. When enabled you may also need to install MathJax separately and
+# configure the path to it using the MATHJAX_RELPATH option.
+
+USE_MATHJAX            = YES
+
+# When MathJax is enabled you need to specify the location relative to the
+# HTML output directory using the MATHJAX_RELPATH option. The destination
+# directory should contain the MathJax.js script. For instance, if the mathjax
+# directory is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to
+# the MathJax Content Delivery Network so you can quickly see the result without
+# installing MathJax.  However, it is strongly recommended to install a local
+# copy of MathJax from http://www.mathjax.org before deployment.
+
+MATHJAX_RELPATH        = http://www.mathjax.org/mathjax
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
+# names that should be enabled during MathJax rendering.
+
+MATHJAX_EXTENSIONS     =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
+# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+
+SEARCHENGINE           = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a PHP enabled web server instead of at the web client
+# using Javascript. Doxygen will generate the search PHP script and index
+# file to put on the web server. The advantage of the server
+# based approach is that it scales better to large projects and allows
+# full text search. The disadvantages are that it is more difficult to setup
+# and does not have live searching capabilities.
+
+SERVER_BASED_SEARCH    = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX         = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
+# Makefile that is written to the output directory.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX          = YES
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE             = a4
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES         =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER           =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
+# the generated latex document. The footer should contain everything after
+# the last chapter. If it is left blank doxygen will generate a
+# standard footer. Notice: only use this tag if you know what you are doing!
+
+LATEX_FOOTER           =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS         = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX           = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE        = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES     = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
+# such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE      = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
+# http://en.wikipedia.org/wiki/BibTeX for more info.
+
+LATEX_BIB_STYLE        = plain
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS         = NO
+
+# Load style sheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE    =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE    =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION          = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT             = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA             =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD                =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING     = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.  This is useful
+# if you want to understand what is going on.  On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION        = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# pointed to by INCLUDE_PATH will be searched when a #include is found.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH           =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS  =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED             =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition that
+# overrules the definition found in the source code.
+
+EXPAND_AS_DEFINED      =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all references to function-like macros
+# that are alone on a line, have an all uppercase name, and do not end with a
+# semicolon, because these will confuse the parser if not removed.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles. For each
+# tag file the location of the external documentation should be added. The
+# format of a tag file without this location is as follows:
+#   TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#   TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths
+# or URLs. Note that each tag file must have a unique name (where the name does
+# NOT include the path). If a tag file is not located in the directory in which
+# doxygen is run, you must also specify the path to the tagfile here.
+
+TAGFILES               =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE       =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS        = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH              = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option also works with HAVE_DOT disabled, but it is recommended to
+# install and use dot, since it yields more powerful graphs.
+
+CLASS_DIAGRAMS         = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH            =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT               = YES
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
+# between CPU load and processing speed.
+
+DOT_NUM_THREADS        = 0
+
+# By default doxygen will use the Helvetica font for all dot files that
+# doxygen generates. When you want a differently looking font you can specify
+# the font name using DOT_FONTNAME. You need to make sure dot is able to find
+# the font, which can be done by putting it in a standard location or by setting
+# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
+# directory containing the font.
+
+DOT_FONTNAME           = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE           = 10
+
+# By default doxygen will tell dot to use the Helvetica font.
+# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
+# set the path where dot can find it.
+
+DOT_FONTPATH           =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH    = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS           = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK               = YES
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside
+# the class node. If there are many fields or methods and many nodes the
+# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
+# threshold limits the number of items for each type to make the size more
+# managable. Set this to 0 for no limit. Note that the threshold may be
+# exceeded by 50% before the limit is enforced.
+
+UML_LIMIT_NUM_FIELDS   = 0
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS     = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH          = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH             = YES
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH           = YES
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will generate a graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH        = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are svg, png, jpg, or gif.
+# If left blank png will be used. If you choose svg you need to set
+# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
+# visible in IE 9+ (other browsers do not have this requirement).
+
+DOT_IMAGE_FORMAT       = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+# Note that this requires a modern browser other than Internet Explorer.
+# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
+# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
+# visible. Older versions of IE do not have SVG support.
+
+INTERACTIVE_SVG        = NO
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH               = @DOXYGEN_DOT_PATH@
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS           =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the
+# \mscfile command).
+
+MSCFILE_DIRS           =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES    = 4
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH    = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT        = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS      = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP            = YES
diff --git a/HySoP/hysop/.pyflymakerc b/HySoP/hysop/.pyflymakerc
new file mode 100644
index 0000000000000000000000000000000000000000..8e18c0c509c10de17dc21463877f2b2e468d65f3
--- /dev/null
+++ b/HySoP/hysop/.pyflymakerc
@@ -0,0 +1,6 @@
+# E265 : pep8 ## comment
+# R0921 : pylint, abstract class not instanciate
+# R0913 : pylint, too many args in class method
+# R0902 : pylint, too many instance attributes
+IGNORE_CODES_PEP8 = ["E265"]
+IGNORE_CODES_PYLINT = ["R0913", "R0902", "R0921"]
diff --git a/HySoP/hysop/.pyflymakercc b/HySoP/hysop/.pyflymakercc
new file mode 100644
index 0000000000000000000000000000000000000000..e53a15c69251dff9ade2f58f5c149de02b3225f2
Binary files /dev/null and b/HySoP/hysop/.pyflymakercc differ
diff --git a/HySoP/hysop/TAGS b/HySoP/hysop/TAGS
new file mode 100644
index 0000000000000000000000000000000000000000..5e1cf1d1729dc656b810347821fb148c01600e76
--- /dev/null
+++ b/HySoP/hysop/TAGS
@@ -0,0 +1,1668 @@
+
+constants.py,82
+def debugdecorator(76,1877
+        def decorator(79,1955
+    def prof(107,2926
+
+domain/box.py,92
+class Box(11,184
+    def __init__(19,340
+    def __str__(56,1688
+    def __eq__(68,2054
+
+domain/domain.py,427
+class Domain(14,321
+    def __new__(20,451
+    def __init__(25,566
+    def isOnTask(59,2064
+    def tasks_on_proc(71,2506
+    def currentTask(78,2691
+    def create_topology(84,2838
+    def create_plane_topology_from_mesh(129,5007
+    def checkTopo(156,6247
+    def register(170,6699
+    def remove(191,7441
+    def reset(206,7974
+    def printTopologies(211,8092
+    def __eq__(220,8316
+    def __ne__(225,8404
+
+domain/obstacle/controlBox.py,317
+class ControlBox(12,288
+    def __init__(21,620
+    def createVolumeAndSides(59,2098
+    def discretize(106,4288
+    def sub(193,8443
+    def integrate_on_proc(212,9263
+    def integrate(226,9708
+    def integrateOnSurface(234,10014
+    def integrateOnSurf_proc(247,10535
+    def globalResolution(267,11250
+
+domain/obstacle/disk.py,174
+class Disk(10,163
+    def __init__(15,225
+        def dist(31,872
+class HalfDisk(37,1069
+    def __init__(41,1143
+        def dist(54,1611
+        def LeftBox(61,1839
+
+domain/obstacle/obstacle.py,101
+class Obstacle(9,132
+    def __init__(14,285
+    def discretize(50,1760
+    def _isempty(90,3584
+
+domain/obstacle/planes.py,552
+class HalfSpace(11,208
+    def __init__(19,471
+        def Outside(39,1373
+    def discretize(55,2039
+    def __str__(67,2469
+class Plane(73,2622
+    def discretize(78,2793
+class SubSpace(100,3627
+    def __init__(107,3865
+        def dist(119,4325
+    def discretize(134,4918
+class SubPlane(157,5855
+    def discretize(162,5969
+    def globalResolution(212,8356
+class PlaneBoundaries(249,9836
+    def __init__(261,10341
+    def discretize(287,11621
+def assertSubPlane(304,12295
+def assertsurface(312,12490
+def assertline(342,13741
+
+domain/obstacle/sphere.py,236
+class Sphere(10,180
+    def __init__(15,243
+        def dist(35,1037
+    def discretize(48,1485
+    def __str__(72,2525
+class HemiSphere(79,2726
+    def __init__(85,2925
+        def LeftBox(100,3540
+    def discretize(104,3642
+
+domain/tests/test_box.py,249
+def test_create_box1(11,269
+def test_create_box2(22,637
+def test_create_box3(35,1057
+def test_create_box4(47,1440
+def test_topo_standard(85,2391
+def test_topo_multi_tasks(99,2853
+def test_topo_plane(114,3337
+def test_topo_from_mesh(126,3707
+
+domain/tests/test_obstacle.py,480
+def testSphere(44,1323
+def testHemiSphere(58,1707
+def testDisk(73,2151
+def testHalfDisk(86,2488
+def testHalfSpace2D(99,2837
+def testHalfSpace3D(108,3104
+def testPlane2D(117,3384
+def testPlane3D(126,3649
+def testSubSpace2D(135,3927
+def testSubSpace3D(143,4164
+def testSubPlane2D(152,4458
+def testSubPlane3D(162,4767
+def testPlaneBC2D(172,5087
+def testPlaneBC3D(183,5449
+def testControlBox2D(194,5826
+def testControlBox3D(206,6153
+def testControlBoxSphere(232,6962
+
+f2py/fftw2py.f90,522
+  subroutine init_fftw_solver(24,627
+  subroutine clean_fftw_solver(68,2184
+  subroutine solve_poisson_2d(81,2518
+  subroutine solve_diffusion_2d(100,3131
+  subroutine solve_poisson_3d(116,3560
+  subroutine solve_poisson_2d_c(135,4456
+  subroutine solve_poisson_3d_c(147,4940
+  subroutine solve_curl_diffusion_3d(159,5566
+  subroutine solve_diffusion_3d(177,6367
+  subroutine projection_om_3d(194,6982
+  subroutine multires_om_3d(210,7584
+  subroutine pressure_3d(232,8399
+  subroutine solve_curl_3d(250,8898
+
+f2py/scales2py.f90,269
+  subroutine init_advection_solver(23,913
+  subroutine init_multiscale(69,2888
+  subroutine solve_advection(89,3599
+  subroutine solve_advection_vect(111,4511
+  subroutine solve_advection_inter_basic(134,5525
+  subroutine solve_advection_inter_basic_vec(154,6439
+
+fields/continuous.py,542
+class Field(11,191
+    def __new__(37,932
+    def __init__(41,1027
+    def discretize(87,2980
+    def setFormula(109,3705
+    def initialize(122,4206
+    def value(142,5112
+    def __str__(156,5593
+    def discretization(173,6167
+    def norm(185,6547
+    def normh(199,7034
+    def setExtraParameters(210,7401
+    def dump(218,7656
+    def hdf_dump(234,8388
+    def hdf_load(249,8989
+    def load(266,9612
+    def zero(281,10285
+    def finalize(293,10648
+    def integrate(296,10686
+    def integrateOnSurface(311,11443
+
+fields/discrete.py,527
+class DiscreteField(17,446
+    def myfunc(43,1463
+    def __new__(69,2252
+    def __init__(73,2347
+    def __getitem__(116,4060
+    def __setitem__(123,4257
+    def _empty_synchro_ghosts(129,4406
+    def _synchro_ghosts(132,4457
+    def initialize(138,4598
+    def norm(195,7480
+    def normh(212,8187
+    def dump(230,8874
+    def load(251,9590
+    def zero(265,10082
+    def integrate_on_proc(270,10229
+    def integrate(288,10967
+    def integrateOnSurface(305,11728
+    def integrateOnSurf_proc(324,12662
+
+fields/tests/test_field.py,158
+def test_continuous(12,294
+def test_analytical(24,558
+def test_analytical_reset(34,878
+def test_discretization(49,1476
+def test_integrate_onSurf(68,2242
+
+fields/tests/test_variable.py,97
+def test_constantVar(10,229
+def func(21,472
+def test_timeVar(26,555
+def test_timeVar2(40,985
+
+fields/variable_parameter.py,215
+def myfunc(34,734
+class VariableParameter(58,1135
+    def __init__(65,1312
+    def _constant(95,2480
+    def update(98,2553
+    def __getitem__(106,2811
+    def __setitem__(113,3017
+    def __str__(119,3146
+
+gpu/config_cayman.py,204
+def copy_space_index_2d(15,310
+def copy_space_index_3d(19,481
+def xy_space_index_2d(37,1311
+def xy_space_index_3d(41,1480
+def xz_space_index_3d(59,2382
+def computational_kernels_index_space(71,2951
+
+gpu/config_default.py,451
+def copy_space_index_2d(15,319
+def copy_space_index_3d(19,490
+def xy_space_index_2d(37,1337
+def xy_space_index_3d(41,1506
+def xz_space_index_3d(59,2432
+def computational_kernels_index_space(70,2989
+def advection_index_space_3d(87,3510
+def advection_index_space_2d_SP(90,3651
+def advection_index_space_2d_DP(93,3795
+def remeshing_index_space_3d(97,3940
+def remeshing_index_space_2d(100,4081
+def advection_and_remeshing_index_space(104,4223
+
+gpu/config_k20m.py,451
+def copy_space_index_2d(15,308
+def copy_space_index_3d(19,479
+def xy_space_index_2d(37,1328
+def xy_space_index_3d(41,1497
+def xz_space_index_3d(59,2424
+def computational_kernels_index_space(70,2981
+def advection_index_space_3d(87,3502
+def advection_index_space_2d_SP(90,3644
+def advection_index_space_2d_DP(93,3780
+def remeshing_index_space_3d(97,3917
+def remeshing_index_space_2d(100,4050
+def advection_and_remeshing_index_space(104,4184
+
+gpu/gpu_discrete.py,449
+class GPUDiscreteField(60,2120
+    def __init__(65,2271
+    def batch_setup(148,5975
+    def _allocate_noBatch(175,7365
+    def _allocate_withBatch(193,8273
+    def fromField(225,9878
+    def setInitializationKernel(249,10950
+    def dump(257,11173
+    def load(266,11417
+    def initialize(274,11653
+    def finalize(333,14508
+    def toDevice(347,15103
+    def toHost(451,19991
+    def wait(546,24420
+    def clean_events(557,24727
+
+gpu/gpu_kernel.py,327
+class KernelListLauncher:KernelListLauncher10,197
+    def __init__(17,334
+    def __call__(48,1427
+    def launch_sizes_in_args(63,1883
+    def function_name(83,2533
+class KernelLauncher(92,2852
+    def __init__(100,3047
+    def launch_sizes_in_args(114,3485
+    def __call__(127,3824
+    def function_name(139,4154
+
+gpu/gpu_particle_advection.py,849
+class GPUParticleAdvection(24,841
+    def __init__(33,1020
+    def setup(183,8251
+    def _collect_kernels_cl_src(363,17280
+    def _buffer_allocations(366,17333
+    def _compute_1c(369,17444
+    def _compute_2c(372,17547
+    def _compute_3c(375,17650
+    def _buffer_initialisations(378,17753
+    def _collect_kernels_cl_src_copy(402,18906
+    def _collect_kernels_cl_src_transpositions_xy(424,19752
+    def _collect_kernels_cl_src_transpositions_xz(493,22894
+    def _collect_usr_cl_src(569,26384
+    def _apply_noBatch(582,26915
+    def _apply_batch(587,27117
+    def apply(637,29615
+    def _init_copy(658,30490
+    def _init_copy_r(666,30899
+    def _init_transpose_xy(674,31305
+    def _init_transpose_xy_r(682,31730
+    def _init_transpose_xz(690,32159
+    def _init_transpose_xz_r(698,32584
+    def finalize(707,33024
+
+gpu/gpu_particle_advection_1k.py,324
+class GPUParticleAdvection1k(18,607
+    def __init__(25,805
+    def setup(69,2832
+    def globalMemoryUsagePreview(72,2895
+    def _buffer_allocations(77,3121
+    def _collect_kernels_cl_src(144,6360
+    def _compute_1c(178,7907
+    def _compute_2c(194,8615
+    def _compute_3c(212,9455
+    def finalize(233,10439
+
+gpu/gpu_particle_advection_2k.py,360
+class GPUParticleAdvection2k(18,607
+    def __init__(24,742
+    def setup(68,2781
+    def globalMemoryUsagePreview(71,2844
+    def _buffer_allocations(76,3076
+    def _collect_kernels_cl_src(159,7297
+    def _compute_advec(213,9507
+    def _compute_1c(227,10069
+    def _compute_2c(240,10663
+    def _compute_3c(256,11401
+    def finalize(275,12283
+
+gpu/kernel_benchmark.py,292
+class BenchmarkSuite:BenchmarkSuite11,165
+    def __init__(14,224
+    def complete_timings(80,3062
+    def launch(95,3642
+    def write_file(127,5022
+def find_min(220,9044
+class Benchmark:Benchmark239,9843
+    def __init__(242,9892
+    def test(316,13359
+    def launch(382,16420
+
+gpu/QtRendering.py,524
+class QtOpenGLRendering(20,579
+    def __init__(45,1617
+    def setup(68,2357
+    def apply(162,6840
+    def finalize(182,7564
+    def startMainLoop(199,8120
+    def setMainLoop(208,8386
+        def problem_step(214,8641
+class MainLoop(231,9353
+    def __init__(236,9463
+    def step(245,9671
+class TestWindow(250,9754
+    def __init__(258,9978
+class GLWidget(269,10353
+    def __init__(273,10439
+    def setup(280,10660
+    def initializeGL(287,10909
+    def paintGL(298,11370
+    def resizeGL(321,12291
+
+gpu/tests/test_advection_nullVelocity.py,1269
+def setup_2D(17,646
+def setup_3D(24,842
+def assertion_2D(31,1046
+def assertion_2D_withPython(55,1854
+def assertion_3D(84,2853
+def assertion_3D_withPython(110,3789
+def test_2D_m6_1k(140,4860
+def test_2D_m6_2k(170,6008
+def test_2D_m6_1k_sFH(190,6664
+def test_2D_m6_2k_sFH(210,7342
+def test_3D_m6_1k(230,8020
+def test_3D_m6_2k(250,8684
+def test_3D_m6_1k_sFH(270,9350
+def test_3D_m6_2k_sFH(290,10036
+def test_2D_m4_1k(311,10736
+def test_2D_m4_2k(340,11841
+def test_2D_m4_1k_sFH(370,12978
+def test_2D_m4_2k_sFH(399,14115
+def test_3D_m4_1k(427,15195
+def test_3D_m4_2k(455,16263
+def test_3D_m4_1k_sFH(483,17335
+def test_3D_m4_2k_sFH(510,18408
+def test_2D_m8_1k(539,19517
+def test_2D_m8_2k(567,20573
+def test_2D_m8_1k_sFH(595,21629
+def test_2D_m8_2k_sFH(623,22715
+def test_3D_m8_1k(651,23801
+def test_3D_m8_2k(679,24873
+def test_3D_m8_1k_sFH(707,25945
+def test_3D_m8_2k_sFH(735,27047
+def test_2D_l6_2k(763,28149
+def test_2D_l6_1k_sFH(782,28760
+def test_2D_l6_2k_sFH(801,29393
+def test_3D_l6_1k(820,30026
+def test_3D_l6_2k(839,30645
+def test_3D_l6_1k_sFH(858,31264
+def test_3D_l6_2k_sFH(877,31905
+def test_rectangular_domain2D(896,32546
+def test_rectangular_domain3D(947,34535
+def test_2D_vector(1000,36676
+def test_3D_vector(1057,39018
+
+gpu/tests/test_advection_randomVelocity.py,994
+def setup_2D(17,615
+def setup_3D(24,811
+def assertion_2D_withPython(31,1015
+def assertion_3D_withPython(61,2009
+def test_2D_m6_1k(95,3105
+def test_2D_m6_2k(123,4148
+def test_2D_m6_1k_sFH(151,5191
+def test_2D_m6_2k_sFH(179,6264
+def test_3D_m6_1k(207,7337
+def test_3D_m6_2k(235,8396
+def test_3D_m6_1k_sFH(263,9454
+def test_3D_m6_2k_sFH(291,10543
+def test_2D_m4_1k(320,11645
+def test_2D_m4_2k(348,12688
+def test_2D_m4_1k_sFH(376,13731
+def test_2D_m4_2k_sFH(404,14804
+def test_3D_m4_1k(432,15877
+def test_3D_m4_2k(460,16942
+def test_3D_m4_1k_sFH(488,18001
+def test_3D_m4_2k_sFH(516,19090
+def test_2D_m8_1k(545,20192
+def test_2D_m8_2k(573,21241
+def test_2D_m8_1k_sFH(601,22290
+def test_2D_m8_2k_sFH(629,23369
+def test_3D_m8_1k(657,24448
+def test_3D_m8_2k(685,25513
+def test_3D_m8_1k_sFH(713,26578
+def test_3D_m8_2k_sFH(741,27673
+def test_rectangular_domain2D(769,28768
+def test_rectangular_domain3D(821,30743
+def test_vector_2D(875,32899
+def test_vector_3D(932,35191
+
+gpu/tests/test_copy.py,111
+def test_copy2D(11,249
+def test_copy2D_rect(48,1510
+def test_copy3D(110,3718
+def test_copy3D_rect(150,5054
+
+gpu/tests/test_opencl_environment.py,164
+def test_queue_unique_creation(6,111
+def test_parse_src_expand_floatN(18,422
+def test_parse_src_expand(40,1122
+def test_parse_expand_remeshed_component(61,1717
+
+gpu/tests/test_transposition.py,334
+def test_transposition_xy2D(11,258
+def test_transposition_xy2D_rect(63,2160
+def test_transposition_xy3D(133,4978
+def test_transposition_xy3D_rect(184,6910
+def test_transposition_xz3D(252,9610
+def test_transposition_xz3D_rect(304,11592
+def test_transposition_xz3Dslice(371,14239
+def test_transposition_xz3Dslice_rect(423,16228
+
+gpu/tools.py,554
+class OpenCLEnvironment(17,369
+    def __init__(21,472
+    def modify(75,2738
+    def _get_platform(118,4911
+    def _get_device(138,5642
+    def _get_context(189,7715
+    def _get_queue(218,8776
+    def get_WorkItems(237,9346
+    def _get_precision_opts(283,11382
+    def build_src(323,13065
+    def parse_file(391,15696
+        def repl_instruction(471,18987
+        def repl_parameter(478,19275
+    def LocalMemAllocator(487,19566
+def get_opengl_shared_environment(521,20985
+def get_opencl_environment(546,21958
+def explore(574,22891
+
+gpu/visu/marchingcube.py,89
+class Marching_Cube(18,296
+    def __init__(23,420
+    def _create_cl_context_(34,747
+
+mpi/bridge.py,418
+class Bridge(13,302
+    def __init__(39,1296
+    def setup(250,9750
+    def __str__(255,9797
+    def createSendSubArray(268,10425
+    def createRecvSubArray(279,10838
+    def _createSubArray(290,11251
+    def uselessTransfer(305,11937
+class Bridge_intercomm(315,12217
+    def __init__(328,12629
+    def setup(441,17588
+class InterBridge(474,18686
+    def __init__(477,18715
+    def _sendSlices(557,21948
+
+mpi/mesh.py,278
+class SubMesh(12,211
+    def __new__(19,331
+    def __init__(23,426
+    def indices(106,4052
+    def __str__(121,4571
+    def toIndexLocal(135,5151
+    def toIndexLocalFull(150,5778
+    def toIndexLocal2(166,6478
+    def toIndexGlobal(183,7188
+    def __eq__(197,7779
+
+mpi/newBridge.py,117
+class InterBridge(39,1343
+    def __init__(41,1371
+    def _sendSlices(158,5888
+    def _createSubArray(190,7206
+
+mpi/tests/test_bridge.py,49
+def test_bridge2D(3,2
+def test_bridge3D(19,455
+
+mpi/tests/test_mesh.py,55
+def test_mesh3D(11,207
+def test_mesh3D_ghost(33,1155
+
+mpi/tests/test_topology.py,351
+def check3D(32,939
+def test_create_default_topology(40,1149
+def test_create_default_topology2(48,1296
+def test_create_default_topology3(64,1768
+def test_create_topologyFromDim(81,2257
+def test_create_topologyFromShape(92,2500
+def test_create_topologyFromCutdir(112,3106
+def test_create_planetopology(128,3603
+def test_operator_equal(148,4400
+
+mpi/topology.py,560
+class Cartesian(17,390
+    def __new__(41,1122
+    def __init__(49,1349
+    def _build_mpi_topo(172,6720
+    def optimizeshape(259,10373
+    def parent(268,10608
+    def ghosts(274,10747
+    def task_id(280,10872
+    def plane_precomputed(287,11026
+    def _computeMesh(322,12438
+    def __eq__(357,14068
+    def __ne__(371,14483
+    def __str__(383,14832
+    def hasGhosts(398,15463
+    def get_id(404,15625
+    def isConsistentWith(411,15800
+    def reset_counter(415,15912
+class topotools(419,15974
+    def collectGlobalIndices(422,16018
+
+numerics/differential_operations.py,1017
+class DifferentialOperation(14,395
+    def __init__(23,596
+    def getWorkLengths(32,860
+class Curl(47,1436
+    def __init__(51,1549
+    def getWorkLengths(92,3130
+    def __call__(95,3225
+    def FDCentral(98,3312
+class DivV(126,4419
+    def __init__(149,5320
+    def getWorkLengths(176,6285
+    def __call__(188,6656
+    def FDCentral4(191,6747
+    def FDCentral4_CAA(221,8034
+class DivWV(253,9317
+    def __init__(272,9995
+    def getWorkLengths(286,10510
+    def __call__(289,10661
+    def FDCentral4(292,10752
+class Laplacian(304,11096
+    def __init__(308,11208
+    def getWorkLengths(322,11711
+    def __call__(325,11806
+class GradS(332,12018
+    def __init__(336,12130
+    def __call__(358,12944
+    def FDCentral(361,13023
+class GradV(376,13464
+    def __init__(381,13580
+    def __call__(389,13886
+    def FDCentral(392,13965
+class GradVxW(403,14257
+    def __init__(408,14397
+    def getWorkLengths(428,15115
+    def __call__(441,15582
+    def FDCentral4_diag(444,15699
+
+numerics/finite_differences.py,602
+class FiniteDifference(11,213
+    def __new__(60,1539
+    def __init__(64,1643
+    def computeIndices(78,2107
+    def compute(86,2373
+    def compute_and_add(98,2759
+class FD_C_2(112,3175
+    def __init__(118,3273
+    def computeIndices(127,3513
+    def compute(142,4159
+    def compute_and_add(152,4510
+class FD2_C_2(160,4832
+    def __init__(165,4933
+    def computeIndices(174,5181
+    def compute(189,5827
+    def compute_and_add(200,6262
+class FD_C_4(210,6663
+    def __init__(215,6760
+    def computeIndices(229,7116
+    def compute(254,8298
+    def compute_and_add(265,8754
+
+numerics/integrators/euler.py,99
+class Euler(11,152
+    def __init__(20,343
+    def getWorkLengths(31,823
+    def _core(41,1214
+
+numerics/integrators/odesolver.py,99
+class ODESolver(14,293
+    def __init__(24,490
+    def __call__(59,2158
+    def _basic(70,2534
+
+numerics/integrators/runge_kutta2.py,97
+class RK2(10,135
+    def __init__(18,325
+    def getWorkLengths(29,802
+    def _core(39,1197
+
+numerics/integrators/runge_kutta3.py,97
+class RK3(10,135
+    def __init__(14,250
+    def getWorkLengths(26,743
+    def _core(36,1138
+
+numerics/integrators/runge_kutta4.py,97
+class RK4(11,159
+    def __init__(15,274
+    def getWorkLengths(27,774
+    def _core(37,1169
+
+numerics/interpolation.py,311
+class Linear(8,126
+    def __init__(11,194
+    def getWorkLengths(54,1865
+    def _affect_work_1D(57,1953
+    def _affect_work_2D_X(60,2041
+    def _affect_work_2D_Y(65,2236
+    def _affect_work_3D_X(70,2431
+    def _affect_work_3D_Y(77,2755
+    def _affect_work_3D_Z(84,3079
+    def __call__(91,3403
+
+numerics/method.py,104
+class NumMethod(10,144
+    def __new__(16,264
+    def getWorkLengths(20,366
+    def __call__(33,838
+
+numerics/remeshing.py,990
+class Remeshing(8,122
+    def __init__(11,170
+    def slice_i_along_d(69,2822
+    def getWorkLengths(75,2953
+    def _affect_work_1D(78,3041
+    def _affect_work_2D_X(81,3143
+    def _affect_work_2D_Y(86,3352
+    def _affect_work_3D_X(91,3561
+    def _affect_work_3D_Y(98,3899
+    def _affect_work_3D_Z(105,4237
+    def __call__(112,4575
+class L2_1(151,6001
+    def __init__(153,6061
+class L2_2(164,6492
+    def __init__(166,6552
+class L2_3(177,7076
+    def __init__(179,7136
+class L2_4(190,7732
+    def __init__(192,7792
+class L4_2(203,8466
+    def __init__(205,8526
+class L4_3(218,9231
+    def __init__(220,9291
+class L4_4(233,10152
+    def __init__(235,10212
+class M8Prime(248,11233
+    def __init__(250,11299
+class L6_3(265,12471
+    def __init__(267,12531
+class L6_4(282,13704
+    def __init__(284,13764
+class L6_5(299,15164
+    def __init__(301,15224
+class L6_6(316,16857
+    def __init__(318,16917
+class L8_4(333,18801
+    def __init__(335,18861
+
+numerics/tests/test_diffOp.py,216
+def computeVel(15,341
+def computeVort(23,543
+def analyticalDivWV(30,732
+def analyticalGradVxW(40,1141
+def analyticalDivStressTensor(47,1325
+def testCurl(73,2093
+def testDivWV(85,2453
+def testGradVxW(107,3151
+
+numerics/tests/test_integrators.py,256
+def func1D(32,688
+def func2D(37,763
+def func3D(44,862
+def analyticalSolution(52,1015
+def f(59,1200
+def integrate(67,1355
+def run_integ(92,2028
+def test_Euler_1D(100,2270
+def test_RK2_1D(104,2317
+def test_RK3_1D(108,2360
+def test_RK4_1D(112,2403
+
+numerics/update_ghosts.py,129
+class UpdateGhosts(12,236
+    def __init__(18,352
+    def __call__(102,4497
+    def applyBC(105,4570
+    def apply(126,5371
+
+operator/adapt_timestep.py,104
+class AdaptTimeStep(18,543
+    def __init__(26,749
+    def discretize(67,2497
+    def setup(76,2822
+
+operator/advection.py,559
+class Advection(19,609
+    def __init__(52,1476
+    def scales_parameters(123,4140
+    def discretize(146,4916
+    def _scales_discretize(161,5434
+    def _create_scales_topo(256,9511
+    def _check_scales_topo(274,10418
+    def _no_scales_discretize(296,11370
+    def get_work_properties(305,11665
+    def setup(336,12915
+    def _setup_scales(345,13281
+    def _setup_python(359,13885
+    def _configure_splitting(443,17698
+    def _apply_no_comm(471,18953
+    def _apply_comm(488,19603
+    def finalize(509,20487
+    def __str__(520,20765
+
+operator/advectionDir.py,193
+class AdvectionDir(17,508
+    def __init__(32,834
+    def discretize(67,2024
+    def setup(115,3867
+    def get_work_properties(151,5489
+    def apply(173,6347
+    def finalize(180,6575
+
+operator/advold.py,349
+class Advection(18,568
+    def __init__(51,1435
+    def discretize(134,4689
+    def getWorkLengths(290,12321
+    def setWorks(313,13429
+    def setup(322,13703
+    def setup_Scales(335,14219
+    def setup_Python(351,14828
+    def _apply_noComm(457,19681
+    def _apply_Comm(474,20330
+    def finalize(495,21214
+    def __str__(506,21491
+
+operator/analytic.py,147
+class Analytic(10,165
+    def __init__(16,287
+    def discretize(46,1585
+    def setup(49,1665
+    def apply(53,1755
+    def __str__(63,2147
+
+operator/baroclinic.py,101
+class Baroclinic(15,395
+    def __init__(21,493
+    def discretize(52,1821
+    def setup(62,2134
+
+operator/computational.py,491
+class Computational(13,306
+    def __new__(30,862
+    def __init__(35,977
+    def get_work_properties(89,3259
+    def discretize(100,3684
+    def _discretize_vars(109,4015
+    def _check_variables(121,4442
+    def _standard_discretize(182,7163
+    def _build_topo(211,8362
+    def _fftw_discretize(218,8658
+    def setup(275,11154
+    def finalize(284,11386
+    def apply(293,11615
+    def printComputeTime(305,12057
+    def update_ghosts(317,12574
+    def __str__(324,12754
+
+operator/continuous.py,375
+class Operator(13,296
+    def __new__(30,942
+    def __init__(35,1057
+    def addRedistributeRequirement(101,3772
+    def getRedistributeRequirement(104,3858
+    def setup(108,3954
+    def finalize(117,4195
+    def apply(124,4300
+    def printComputeTime(133,4617
+    def is_up(138,4699
+    def _set_io(145,4871
+class Tools(164,5611
+    def checkDevice(170,5720
+
+operator/curlAndDiffusion.py,76
+class CurlDiffusion(17,340
+    def __init__(29,564
+    def setup(45,1192
+
+operator/density.py,101
+class DensityVisco(11,207
+    def __init__(17,332
+    def discretize(31,854
+    def setup(35,949
+
+operator/differential.py,228
+class Differential(16,473
+    def __init__(24,652
+    def discretize(42,1351
+    def setup(64,2159
+class Curl(73,2381
+    def get_work_properties(77,2466
+    def setup(94,3139
+class Grad(114,4046
+    def setup(120,4143
+
+operator/diffusion.py,100
+class Diffusion(15,340
+    def __init__(27,565
+    def discretize(53,1536
+    def setup(60,1761
+
+operator/discrete/adapt_timestep.py,270
+class AdaptTimeStep_D(23,791
+    def __init__(31,1002
+    def _gradU(147,5827
+    def _compute_gradU(153,6037
+    def _compute_stretch(162,6402
+    def _compute_cfl(175,6933
+    def _compute_vort(179,7099
+    def _compute_deform(184,7301
+    def apply(207,8198
+
+operator/discrete/analytic.py,73
+class Analytic_D(11,223
+    def __init__(17,354
+    def apply(35,1020
+
+operator/discrete/baroclinic.py,73
+class Baroclinic(15,479
+    def __init__(20,580
+    def apply(63,2449
+
+operator/discrete/curlAndDiffusion_fft.py,101
+class DiffusionFFT(15,357
+    def __init__(22,518
+    def apply(42,1203
+    def __str__(119,4998
+
+operator/discrete/density.py,76
+class DensityVisco_d(11,226
+    def __init__(16,294
+    def apply(35,930
+
+operator/discrete/differential.py,327
+class Differential(21,629
+    def __init__(29,811
+    def apply(51,1670
+class CurlFFT(57,1761
+    def apply(62,1868
+    def finalize(71,2363
+class CurlFD(78,2514
+    def __init__(83,2627
+    def _set_work_arrays(94,3082
+    def apply(104,3461
+class GradFD(109,3621
+    def __init__(114,3734
+    def apply(124,4189
+
+operator/discrete/diffusion_fft.py,101
+class DiffusionFFT(15,337
+    def __init__(22,498
+    def apply(43,1319
+    def finalize(70,2432
+
+operator/discrete/discrete.py,288
+class DiscreteOperator(11,270
+    def __new__(22,478
+    def __init__(27,593
+    def get_work_properties(80,2557
+    def _set_work_arrays(91,2982
+    def setWriter(102,3423
+    def apply(110,3590
+    def finalize(119,3898
+    def __str__(125,3991
+    def update_ghosts(139,4485
+
+operator/discrete/energy_enstrophy.py,149
+class EnergyEnstrophy(12,279
+    def __init__(16,400
+    def _set_work_arrays(51,1791
+    def get_work_properties(76,2807
+    def apply(90,3253
+
+operator/discrete/particle_advection.py,142
+class ParticleAdvection(16,451
+    def __init__(22,600
+    def _set_work_arrays(98,3763
+    def apply(122,4572
+    def finalize(170,6567
+
+operator/discrete/penalization.py,75
+class Penalization(13,327
+    def __init__(20,481
+    def apply(58,1894
+
+operator/discrete/poisson_fft.py,326
+class PoissonFFT(17,386
+    def __init__(24,542
+    def _solve2D(90,3167
+    def _project(99,3497
+    def _solve3D_multires(110,3937
+    def _solve3D_proj_multires(132,5057
+    def _solve3D_proj(141,5314
+    def _solve3D(150,5536
+    def _solve_and_correct(165,6236
+    def apply(171,6379
+    def finalize(178,6631
+
+operator/discrete/scales_advection.py,104
+class ScalesAdvection(18,393
+    def __init__(24,510
+    def apply(69,2329
+    def finalize(87,2959
+
+operator/discrete/stretching.py,380
+class Stretching(24,810
+    def __init__(35,1120
+    def _set_work_arrays(80,3051
+    def update_ghosts(104,4043
+    def apply(111,4220
+class Conservative(117,4311
+    def __init__(122,4481
+        def rhs(126,4627
+    def apply(138,5103
+class GradUW(164,6172
+    def __init__(170,6335
+        def rhs(177,6593
+    def apply(203,7575
+    def checkStability(233,8680
+
+operator/discrete/velocity_correction.py,119
+class VelocityCorrection_D(17,450
+    def __init__(26,724
+    def computeCorrection(81,3170
+    def apply(120,4904
+
+operator/energy_enstrophy.py,166
+class EnergyEnstrophy(10,224
+    def __init__(22,590
+    def get_work_properties(47,1595
+    def setup(63,2232
+    def energy(75,2754
+    def enstrophy(81,2894
+
+operator/hdf_io.py,465
+class HDF_IO(24,606
+    def __init__(32,762
+    def discretize(102,3391
+    def setup(135,4839
+    def open_hdf(154,5666
+    def apply(168,6148
+class HDF_Writer(174,6261
+    def __init__(178,6362
+    def apply(204,7152
+    def createXMFFile(222,7846
+    def _step_HDF5(249,8869
+    def _step_HDF5_XMF(278,10195
+class HDF_Reader(283,10292
+    def __init__(287,10397
+    def apply(306,11098
+    def dataset_names(326,11908
+    def finalize(335,12176
+
+operator/monitors/compute_forces.py,300
+class DragAndLift(14,400
+    def __init__(21,687
+    def _mpi_allsum(77,2947
+    def _mpi_sum(84,3191
+    def setup(93,3574
+    def apply(120,5013
+    def _integrateOnSurface(172,7127
+    def _integrateOnBox(234,9701
+    def _integrateOnBox2(253,10403
+    def _integrateOnBoxLoop(272,11103
+
+operator/monitors/energy_enstrophy.py,166
+class EnergyEnstrophy(10,224
+    def __init__(22,590
+    def get_work_properties(47,1595
+    def setup(63,2232
+    def energy(75,2754
+    def enstrophy(81,2894
+
+operator/monitors/monitoring.py,152
+class Monitoring(11,241
+    def __init__(17,378
+    def discretize(31,1016
+    def setup(37,1257
+    def finalize(44,1417
+    def __str__(48,1515
+
+operator/monitors/printer.py,244
+class Printer(23,527
+    def __init__(27,629
+    def setup(92,2985
+    def apply(121,4125
+    def createXMFFile(140,4847
+    def finalize(164,5999
+    def _step_HDF5(168,6087
+    def _step_HDF5_XMF(220,8509
+    def _step_DATA(224,8605
+
+operator/monitors/reader.py,156
+class Reader(18,337
+    def __init__(22,445
+    def apply(109,4248
+    def dataset_names(112,4307
+    def readHDF5(119,4480
+    def finalize(139,5488
+
+operator/monitors/reprojection_criterion.py,140
+class Reprojection_criterion(18,593
+    def __init__(24,803
+    def setup(86,3519
+    def apply(106,4219
+    def doProjection(156,6329
+
+operator/penalization.py,103
+class Penalization(12,267
+    def __init__(25,477
+    def discretize(44,1109
+    def setup(50,1329
+
+operator/poisson.py,134
+class Poisson(15,386
+    def __init__(28,603
+    def discretize(66,2173
+    def setup(80,2811
+    def activateProjection(93,3401
+
+operator/redistribute.py,474
+class Redistribute(30,1002
+    def __init__(41,1286
+    def _vars_setup_fromdict(129,4815
+    def _vars_setup_fromlist(153,5871
+    def _checkOperators(191,7639
+    def setup(209,8498
+    def _apply_toHost_host_toDevice(224,8973
+    def _apply_toHost_host(229,9078
+    def _apply_host_toDevice(234,9174
+    def _toHost(240,9273
+    def _toDevice(246,9409
+    def _host(252,9547
+    def _wait_host(258,9691
+    def _wait_device(264,9821
+    def test(270,10029
+
+operator/redistribute_inter.py,441
+class RedistributeInter(17,541
+    def __init__(24,814
+    def _vars_setup_fromdict(68,2720
+    def _vars_setup_fromlist(91,3794
+    def setup(127,5577
+    def apply(155,6562
+    def _apply_toHost_host_toDevice(166,6956
+    def _apply_toHost_host(178,7411
+    def _apply_host_toDevice(188,7727
+    def _apply_host(199,8118
+    def _host(205,8297
+    def _toHost(235,9594
+    def _toDevice(246,9963
+    def _wait_host(257,10338
+
+operator/redistribute_intra.py,429
+class RedistributeIntra(31,1098
+    def __init__(40,1366
+    def setup(62,2205
+    def _apply_toHost_host_toDevice(130,4979
+    def _apply_toHost_host(139,5266
+    def _apply_host_toDevice(146,5484
+    def _toHost(153,5706
+    def _toDevice(163,6026
+    def _host(173,6352
+    def _wait_host(214,8369
+    def _wait_all(229,8863
+    def addRedistributeRequirement(233,8943
+    def getRedistributeRequirement(237,9091
+
+operator/SAVE_REDIS/redistribute.py,485
+class Redistribute(29,991
+    def __init__(38,1250
+    def setup(91,3211
+    def _apply_toHost_host_toDevice(222,9275
+    def _apply_toHost_host(231,9560
+    def _apply_host_toDevice(238,9776
+    def _toHost(245,9996
+    def _toDevice(255,10330
+    def _host(265,10668
+    def _wait_host(306,12749
+    def _wait_device(321,13241
+    def _wait_all(327,13447
+    def test(331,13527
+    def addRedistributeRequirement(367,14886
+    def getRedistributeRequirement(371,15028
+
+operator/SAVE_REDIS/redistribute_intercomm.py,457
+class RedistributeIntercomm(17,562
+    def __init__(24,835
+    def discretize(84,3360
+    def setup(100,3916
+    def apply(198,8600
+    def _apply_toHost_host_toDevice(209,8994
+    def _apply_toHost_host(221,9449
+    def _apply_host_toDevice(231,9765
+    def _apply_host(242,10154
+    def _host(248,10333
+    def _toHost(275,11595
+    def _toDevice(286,11964
+    def _wait_device(297,12339
+    def _wait_host(303,12541
+    def test(315,12949
+
+operator/stretching.py,138
+class Stretching(18,500
+    def __init__(25,620
+    def get_work_properties(61,2009
+    def discretize(81,2806
+    def setup(91,3119
+
+operator/tests/test_advec_scales.py,451
+def test_nullVelocity_m4(16,498
+def test_nullVelocity_vec_m4(56,2201
+def test_nullVelocity_m6(107,4328
+def test_nullVelocity_vec_m6(132,5269
+def test_nullVelocity_m8(168,6666
+def test_nullVelocity_vec_m8(209,8304
+def _randomVelocity_m4(250,10054
+def _randomVelocity_vec_m4(299,12078
+def test_randomVelocity_m6(358,14619
+def test_randomVelocity_vec_m6(407,16647
+def test_randomVelocity_m8(466,19192
+def test_randomVelocity_vec_m8(515,21235
+
+operator/tests/test_analytic.py,749
+def func_scal_1(15,456
+def func_scal_2(20,555
+def func_vec_1(25,637
+def func_vec_2(32,763
+def func_vec_3(39,870
+def func_vec_4(46,1011
+def func_vec_5(53,1133
+def func_vec_6(61,1302
+def test_analytical_field_1(81,1765
+def test_analytical_field_2(103,2444
+def test_analytical_field_3(124,3111
+def test_analytical_field_4(150,3971
+def test_analytical_field_5(176,4875
+def test_analytical_field_6(204,5819
+def test_analytical_field_7(233,6834
+def test_analytical_field_8(263,7845
+def test_analytical_field_9(292,8862
+def test_analytical_field_10(323,9952
+def test_analytical_op_1(352,10959
+def test_analytical_op_3(380,11835
+def test_analytical_op_4(412,12919
+def test_analytical_op_5(447,14091
+def test_analytical_op_6(471,14879
+
+operator/tests/test_diff_poisson_3D.py,76
+def computeVel(9,222
+def computeVort(16,307
+def test_Diff_Poisson(39,836
+
+operator/tests/test_differential.py,329
+def callOp(27,697
+def velocity_f(72,2200
+def vorticity_f(79,2359
+def grad_velo(86,2548
+def test_CurlFD(99,2925
+def test_CurlFD2(107,3201
+def test_CurlFFT(115,3487
+def test_Grad(138,4318
+def test_Grad2(147,4620
+def test_CurlFD_2(155,4903
+def test_CurlFD2_2(163,5193
+def test_Grad_2(172,5504
+def test_Grad2_2(181,5820
+
+operator/tests/test_diffusion.py,141
+def computeVort(22,510
+def computeVort2D(29,762
+def test_Diffusion3D(36,941
+def test_Diffusion3D_2(54,1444
+def test_Diffusion2D(72,1936
+
+operator/tests/test_hdf5_io.py,298
+def init1(29,684
+def init2(38,939
+def func3D(47,1220
+def vec3D(52,1308
+def vort3D(59,1499
+def purgeFiles(66,1676
+def test_write_read_scalar_3D(71,1754
+def test_write_read_scalar_3D_defaults(117,3277
+def test_write_read_vectors_3D_defaults(167,4891
+def test_write_read_vectors_3D(229,6969
+
+operator/tests/test_particle_advection.py,463
+def setup_2D(14,410
+def setup_vector_2D(22,656
+def setup_list_2D(30,924
+def setup_3D(39,1233
+def setup_vector_3D(47,1494
+def setup_list_3D(55,1777
+def assertion(64,2101
+def assertion_vector2D(76,2514
+def assertion_vector3D(95,3297
+def assertion_list(116,4206
+def test_nullVelocity_2D(136,5012
+def test_nullVelocity_vector_2D(148,5282
+def test_nullVelocity_list_2D(160,5575
+def test_nullVelocity_3D(173,5917
+def test_nullVelocity_vector_3D(185,6195
+
+operator/tests/test_penalization.py,175
+def computeVel(16,519
+def computeScal(22,608
+def computeVel3D(27,677
+def init(34,792
+def testPenalScal2D(46,1103
+def testPenalVec2D(80,2285
+def testPenalVec3D(115,3492
+
+operator/tests/test_poisson.py,157
+def computeVort(27,767
+def computeRef(35,1056
+def computeVort2D(51,1646
+def computeRef2D(59,1838
+def test_Poisson3D(66,2028
+def test_Poisson2D(99,3119
+
+operator/tests/test_redistribute.py,121
+def func_vec_1(9,244
+def init_context(15,369
+def test_distribute_intra_1(57,1487
+def test_distribute_intra_2(70,1816
+
+operator/tests/test_Stretching.py,76
+def computeVel(13,404
+def computeVort(30,899
+def test_Stretching(53,1595
+
+operator/tests/test_velocity_correction.py,182
+def computeVel(29,621
+def computeVort(37,823
+def computeVel2D(45,1047
+def computeVort2D(52,1209
+def test_velocity_correction_3D(57,1295
+def test_velocity_correction_2D(99,2746
+
+operator/velocity_correction.py,167
+class VelocityCorrection(14,370
+    def __init__(23,639
+    def discretize(59,2396
+    def setup(63,2497
+    def apply(74,3043
+    def computeCorrection(80,3212
+
+problem/navier_stokes.py,49
+class NSProblem(13,373
+    def __init__(17,454
+
+problem/problem.py,308
+class Problem(14,344
+    def __new__(27,810
+    def __init__(31,905
+    def addMonitors(89,3231
+    def setup(104,3708
+    def pre_setup(128,4327
+    def solve(171,5852
+    def finalize(199,6722
+    def __str__(228,7611
+    def dump(237,7860
+    def restart(254,8580
+    def setDumpFreq(279,9524
+
+problem/problem_tasks.py,208
+class ProblemTasks(17,532
+    def __new__(26,880
+    def __init__(30,975
+    def pre_setup(60,2417
+    def addMonitors(120,5052
+    def setup(137,5669
+    def solve(167,6441
+    def finalize(192,7275
+
+problem/problem_with_GLRendering.py,101
+class ProblemGLRender(12,277
+    def __init__(19,460
+    def setup(40,1260
+    def solve(55,1662
+
+problem/simulation.py,219
+class Simulation(10,134
+    def __init__(15,214
+    def advance(69,2195
+    def updateTimeStep(102,3389
+    def printState(109,3585
+    def __str__(116,3809
+    def initialize(125,4185
+    def finalize(133,4411
+
+problem/tests/test_simulation.py,55
+def test_simu_incr(11,220
+def test_simu_incr2(33,766
+
+problem/tests/test_transport.py,395
+def cosinus_product_2D(14,362
+def cosinus_product_3D(18,439
+def gaussian_scalar_3D(22,538
+def rotating_velocity_3D(30,687
+def gaussian_scalar_2D(36,825
+def rotating_velocity_2D(44,963
+def assertion(50,1083
+def test_nullVelocity_2D(69,1922
+def test_nullVelocity_3D(83,2289
+def test_gaussian_2D(97,2678
+def test_cosinus_translation_2D(111,3048
+def test_cosinus_translation_3D(125,3400
+
+problem/transport.py,55
+class TransportProblem(9,169
+    def __init__(13,253
+
+test/test_obstacle/test_obstacle.py,16
+def run(12,174
+
+test/test_operator/test_CondStability.py,183
+class test_CondStability(18,525
+    def vitesse(23,622
+    def vorticite(29,728
+    def scalaire(35,836
+    def testCondStab(42,972
+    def runTest(136,4694
+def suite(139,4746
+
+test/test_operator/test_Curl.py,178
+class test_Curl(24,670
+    def setup(28,754
+    def testOperatorCurl(40,1183
+    def vitesse(74,3452
+    def vorticite(93,4085
+    def runTest(112,4931
+def suite(116,5008
+
+test/test_operator/test_DivProduct.py,220
+class test_DivProduct(24,670
+    def setup(28,760
+    def testOperatorDiv(40,1199
+    def vitesse(81,4124
+    def vorticite(87,4252
+    def analyticalDivProduct(93,4361
+    def runTest(99,4502
+def suite(103,4578
+
+test/test_operator/test_Forces.py,183
+class test_Forces(11,172
+    def vitesse(16,259
+    def vorticite(32,777
+    def scalaire(48,1451
+    def testComputeForces(55,1587
+    def runTest(111,3563
+def suite(114,3620
+
+test/test_operator/test_Grad.py,238
+class test_Grad(24,670
+    def setup(28,754
+    def testOperatorGrad(40,1187
+    def vitesse(99,5194
+    def gradientUx(118,5827
+    def gradientUy(124,5998
+    def gradientUz(130,6168
+    def runTest(136,6338
+def suite(140,6415
+
+test/test_operator/test_GradUomega.py,189
+class test_GradUomega(25,708
+    def setup(29,798
+    def testOperatorGradUomega(41,1231
+    def vitesse(86,4217
+    def stretch(105,4850
+    def runTest(111,5175
+def suite(115,5252
+
+test/test_operator/test_Penalization.py,58
+def vitesse(11,239
+def vorticite(17,320
+def run(24,404
+
+test/test_operator/test_Stretching.py,192
+class test_Stretching(14,377
+    def vitesse(19,468
+    def vorticite(35,986
+    def scalaire(51,1660
+    def testOperatorStretching(58,1796
+    def runTest(100,3038
+def suite(103,3100
+
+test/test_operator/test_transport_d.py,154
+class Transport_dTestCase(11,137
+    def setup_OpenCL_basic(13,184
+    def setup(86,5934
+    def tearDown(101,6551
+    def test_apply_basic(104,6589
+
+test/test_particular_solvers/test_EDO_erreur.py,156
+class test_EDO(17,480
+    def analyticalSolution(22,564
+    def f 28,771
+    def testIntegratorEDO(34,928
+    def runTest(162,5990
+def suite(165,6047
+
+test/test_particular_solvers/test_euler.py,209
+class test_Euler(16,455
+    def setup(23,608
+    def testEulerInt(36,1052
+    def vitesse(85,3781
+    def fctTest(91,3891
+    def analyticalSolution(97,4163
+    def runTest(103,4309
+def suite(108,4386
+
+test/test_particular_solvers/test_RK.py,231
+class test_RK(21,733
+    def vitesse(26,816
+    def vorticite(32,944
+    def analyticalSolution(38,1083
+    def f 44,1296
+    def wgradu 51,1442
+    def testIntegratorRK(58,1614
+    def runTest(145,5347
+def suite(148,5403
+
+tools/indices.py,54
+def condition2Slice(5,63
+def removeLastPoint(23,582
+
+tools/io_utils.py,389
+class io(15,353
+    def default_path(23,477
+    def check_dir(77,2539
+    def set_default_path(94,3148
+class Writer(107,3603
+    def __init__(124,4063
+    def do_write(187,6492
+    def _fullwrite(196,6782
+    def _partialwrite(201,6924
+    def finalize(204,6996
+    def __str__(209,7139
+class XMF(218,7429
+    def _list_format(223,7525
+    def write_grid_attributes(233,7864
+
+tools/numpywrappers.py,389
+def zeros(14,274
+def ones(24,571
+def zeros_like(34,865
+def copy(45,1217
+def asarray(53,1389
+def asrealarray(62,1598
+def asintarray(72,1855
+def int_zeros(79,2016
+def asdimarray(86,2156
+def asboolarray(96,2438
+def dim_ones(106,2718
+def dim_zeros(113,2853
+def equal(120,2990
+def abs(128,3250
+def real_sum(137,3469
+def prod(141,3532
+def add(145,3605
+def writeToFile(149,3684
+
+tools/parameters.py,203
+class MPI_params(12,228
+    def __new__(36,1037
+class Discretization(45,1302
+    def __new__(52,1615
+    def __eq__(64,2126
+    def __ne__(70,2339
+class IO_params(80,2542
+    def __new__(90,2953
+
+tools/problem2dot.py,40
+def get_shape(24,529
+def toDot(33,746
+
+tools/profiling.py,259
+def create_mem_stat(52,1809
+def get_mem_stats(66,2156
+def print_mem_stat_file(84,2856
+def get_stats(104,3698
+def f8(123,4318
+def print_line_file(127,4354
+def print_stats_file(153,5044
+def launch_meliae_profiling(204,6706
+def launch_cProfile(234,7995
+
+tools/remeshing_formula_parsing.py,102
+def parse(25,571
+def createFMA(114,3771
+    def fma_replace(149,4653
+    def fma_recurse(185,5816
+
+tools/sys_utils.py,48
+class SysUtils(7,61
+    def in_ipython(12,163
+
+tools/tests/test_formula_parsing.py,70
+def test_parsing_toPython(85,3871
+def test_parsing_toOpenCL(89,3957
+
+tools/tests/test_parameters.py,176
+def test_io_params_1(12,187
+def test_io_params_2(22,466
+def test_io_params_3(31,726
+def test_io_params_4(39,961
+def test_io_params_5(49,1256
+def test_io_params_6(56,1436
+
+tools/tests/test_timers.py,131
+class A_class(7,111
+    def __init__(8,134
+    def call(14,261
+    def call_other(18,322
+def test_timer_from_decorator(22,371
+
+tools/timers.py,553
+def timed_function(12,209
+    def wrapper(20,463
+class FunctionTimer(29,703
+    def __init__(37,959
+    def __call__(52,1355
+        def f(60,1632
+    def __str__(73,2028
+class ManualFunctionTimer(79,2170
+    def __init__(85,2398
+        def fun(86,2428
+    def append_time(91,2536
+class Timer(100,2764
+    def __init__(106,2911
+    def __add__(120,3338
+    def getFunctionTimer(131,3587
+    def addFunctionTimer(145,4024
+    def addSubTimer(153,4198
+    def compute_summary(159,4373
+    def rTimes(171,4809
+    def __str__(186,5433
+
+tools/tests/__init__.py,0
+
+tools/plotDrag.py,0
+
+tools/__init__.py,0
+
+test/test_tools/__init__.py,0
+
+test/test_particular_solvers/__init__.py,0
+
+test/test_operator/__init__.py,0
+
+test/test_obstacle/__init__.py,0
+
+test/main_unit_tests.py,0
+
+test/__init__.py,0
+
+problem/tests/test_problem.py,0
+
+problem/tests/__init__.py,0
+
+problem/__init__.py,0
+
+operator/tests/Testing/Temporary/CTestCostData.txt,0
+
+operator/tests/__init__.py,0
+
+operator/monitors/__init__.py,0
+
+operator/discrete/__init__.py,0
+
+operator/__init__.py,0
+
+numerics/tests/__init__.py,0
+
+numerics/integrators/__init__.py,0
+
+numerics/__init__.py,0
+
+mpi/tests/__init__.py,0
+
+mpi/main_var.py,0
+
+mpi/__init__.py,0
+
+methods_keys.py,0
+
+methods.py,0
+
+gpu/visu/__init__.py,0
+
+gpu/tests/__init__.py,0
+
+gpu/__init__.py,0
+
+fields/tests/__init__.py,0
+
+fields/__init__.py,0
+
+fakef2py/scales2py/__init__.py,0
+
+fakef2py/fftw2py/__init__.py,0
+
+fakef2py/__init__.py,0
+
+f2py/parameters.f90,0
+
+domain/tests/__init__.py,0
+
+domain/obstacle/__init__.py,0
+
+domain/__init__.py,0
+
+default_methods.py,0
+
+__init__.py,0
diff --git a/HySoP/hysop/__init__.py b/HySoP/hysop/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..5ccaf26dd07b7a8716cc3e1e1d1b073c5923d5cb
--- /dev/null
+++ b/HySoP/hysop/__init__.py
@@ -0,0 +1,76 @@
+"""
+@package parmepy
+
+Python package dedicated to flow simulation using particular methods
+on hybrid architectures (MPI-GPU)
+
+"""
+__version__ = 1.00
+__all__ = ['Box', 'Field']
+
+# Compilation flags
+__MPI_ENABLED__ = "ON" is "ON"
+__GPU_ENABLED__ = "ON" is "ON"
+__FFTW_ENABLED__ = "ON" is "ON"
+__SCALES_ENABLED__ = "ON" is "ON"
+__VERBOSE__ = "OFF" in ["1", "3"]
+__DEBUG__ = "OFF" in ["2", "3"]
+__PROFILE__ = "OFF" in ["0", "1"]
+__OPTIMIZE__ = "OFF" is "ON"
+
+import parmepy.tools.io_utils as io
+default_path = io.io.default_path()
+msg_start = '\nStarting Parmes (no mpi) version '
+msg_start += str(__version__)
+msg_io = '\nWarning : default path for all i/o is ' + default_path + '.\n'
+msg_io += 'If you want to change this, use io.set_default_path function.\n'
+
+# MPI
+if __MPI_ENABLED__:
+    import parmepy.mpi as mpi
+    if mpi.main_rank == 0:
+        msg_start += ' on ' + str(mpi.main_size) + ' mpi process(es).'
+        print msg_start
+        print msg_io
+
+else:
+    print msg_start
+    print msg_io
+
+# OpenCL
+__DEFAULT_PLATFORM_ID__ = 0
+__DEFAULT_DEVICE_ID__ = 2
+
+
+version = "1.0.0"
+
+## Box-type physical domain
+import parmepy.domain.box
+Box = parmepy.domain.box.Box
+
+## Fields
+import parmepy.fields.continuous
+Field = parmepy.fields.continuous.Field
+
+## Variable parameters
+import parmepy.fields.variable_parameter
+VariableParameter = parmepy.fields.variable_parameter.VariableParameter
+
+## Simulation parameters
+import parmepy.problem.simulation
+Simulation = parmepy.problem.simulation.Simulation
+
+# ## ## Problem
+# import problem.problem
+# Problem = problem.problem.Problem
+
+
+# ## ## Solver
+# import particular_solvers.basic
+# ## #import particular_solvers.gpu
+# ParticleSolver = particular_solvers.basic.ParticleSolver
+# ## #GPUParticleSolver = particular_solvers.gpu.GPUParticleSolver
+
+
+## from tools.explore_hardware import explore
+
diff --git a/HySoP/hysop/__init__.py.in b/HySoP/hysop/__init__.py.in
index d3e6d58501b703ea9a9782ab1f3eef85abbe2193..2edad41f57cbe5d752c9ea6c922777a3ae795fe8 100755
--- a/HySoP/hysop/__init__.py.in
+++ b/HySoP/hysop/__init__.py.in
@@ -17,13 +17,25 @@ __VERBOSE__ = "@DEBUG@" in ["1", "3"]
 __DEBUG__ = "@DEBUG@" in ["2", "3"]
 __PROFILE__ = "@PROFILE@" in ["0", "1"]
 __OPTIMIZE__ = "@OPTIM@" is "ON"
+
+import parmepy.tools.io_utils as io
+default_path = io.io.default_path()
+msg_start = '\nStarting @PACKAGE_NAME@ (no mpi) version '
+msg_start += str(__version__)
+msg_io = '\nWarning : default path for all i/o is ' + default_path + '.\n'
+msg_io += 'If you want to change this, use io.set_default_path function.\n'
+
 # MPI
 if __MPI_ENABLED__:
     import parmepy.mpi as mpi
-    if(mpi.main_rank == 0):
-        print ("Starting @PACKAGE_NAME@ version " + str(__version__) + ".\n")
+    if mpi.main_rank == 0:
+        msg_start += ' on ' + str(mpi.main_size) + ' mpi process(es).'
+        print msg_start
+        print msg_io
+
 else:
-    print ("Starting @PACKAGE_NAME@ (no mpi) version " + str(__version__) + ".\n")
+    print msg_start
+    print msg_io
 
 # OpenCL
 __DEFAULT_PLATFORM_ID__ = @OPENCL_DEFAULT_OPENCL_PLATFORM_ID@
diff --git a/HySoP/hysop/__init__.pyc b/HySoP/hysop/__init__.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f8867bc895d3e228fac4c39c9ec841d37a204f9
Binary files /dev/null and b/HySoP/hysop/__init__.pyc differ
diff --git a/HySoP/hysop/constants.py b/HySoP/hysop/constants.py
index 36720598d237ebe774857b53d39df76d551faea4..d77a3e93b93887359d33dc1a8a6ca1a3b8298500 100644
--- a/HySoP/hysop/constants.py
+++ b/HySoP/hysop/constants.py
@@ -3,28 +3,25 @@
 Constant parameters required for the parmepy package (internal use).
 
 """
-from parmepy import __DEBUG__, __PROFILE__, __OPTIMIZE__
+from parmepy import __DEBUG__, __PROFILE__
 import numpy as np
 import math
 from parmepy.mpi import MPI
-# Utilities for serialization
-if __OPTIMIZE__:
-    import cPickle
-    parmesPickle = cPickle
-else:
-    from scitools.NumPyDB import NumPyDB_cPickle
-    parmesPickle = NumPyDB_cPickle
-
 
 PI = math.pi
 # Set default type for real and integer numbers
 PARMES_REAL = np.float64
+SIZEOF_PARMES_REAL = int(PARMES_REAL(1.).nbytes)
 # type for array indices
 PARMES_INDEX = np.uint32
 # type for integers
-PARMES_INTEGER = np.int64
+PARMES_INTEGER = np.int32
+# integer used for arrays dimensions
+PARMES_DIM = np.int16
 # float type for MPI messages
 PARMES_MPI_REAL = MPI.DOUBLE
+# int type for MPI messages
+PARMES_MPI_INTEGER = MPI.INT
 ## default array layout (fortran or C convention)
 ORDER = 'F'
 # to check array ordering with :
@@ -43,7 +40,7 @@ YDIR = 1
 ## label for z direction
 ZDIR = 2
 ## Tag for periodic boundary conditions
-PERIODIC = 0
+PERIODIC = 99
 ## Directions string
 S_DIR = ["_X", "_Y", "_Z"]
 ## Stretching formulation (div(w:u))
@@ -62,12 +59,12 @@ WITH_GUESS = 1
 NOALIAS = 2
 
 # File format types for output
-## VTK ouput for printers
-VTK = 0
 ## HDF5 output for printers
-HDF5 = 1
+HDF5 = 998
 ## ascii text output
-DATA = 2
+ASCII = 997
+## Default value for task id (mpi task)
+DEFAULT_TASK_ID = 999
 
 
 #define debug decorator:
@@ -105,4 +102,3 @@ else:
     def prof(f):
         # Nothing ...
         return f
-
diff --git a/HySoP/hysop/constants.pyc b/HySoP/hysop/constants.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d85c2f31197bc6dc62cdc99e89d221a935af14f1
Binary files /dev/null and b/HySoP/hysop/constants.pyc differ
diff --git a/HySoP/hysop/default_methods.py b/HySoP/hysop/default_methods.py
index d520830e91745ecf23a0cf3de9b7ae4b1d547a0e..dac3d5721fde789ede97c3c2f45c1a44d81e7ab1 100644
--- a/HySoP/hysop/default_methods.py
+++ b/HySoP/hysop/default_methods.py
@@ -4,7 +4,8 @@ Default parameter values for methods in operators.
 """
 from parmepy.methods_keys import TimeIntegrator, Interpolation, GhostUpdate,\
     Remesh, Support, Splitting, MultiScale, Formulation, SpaceDiscretisation, \
-    dtCrit
+    dtCrit, Precision
+from parmepy.constants import PARMES_REAL
 from parmepy.numerics.integrators.runge_kutta2 import RK2
 from parmepy.numerics.integrators.runge_kutta3 import RK3
 from parmepy.numerics.interpolation import Linear
@@ -13,9 +14,10 @@ from parmepy.numerics.remeshing import L2_1
 
 
 ADVECTION = {TimeIntegrator: RK2, Interpolation: Linear,
-             Remesh: L2_1, Support: '', Splitting: 'o2', MultiScale: L2_1}
+             Remesh: L2_1, Support: '', Splitting: 'o2', MultiScale: L2_1,
+             Precision: PARMES_REAL}
 
-from parmepy.numerics.finite_differences import FD_C_4
+from parmepy.numerics.finite_differences import FD_C_4, FD_C_2
 
 DIFFERENTIAL = {SpaceDiscretisation: FD_C_4, GhostUpdate: True}
 
@@ -30,3 +32,5 @@ POISSON = {SpaceDiscretisation: 'fftw', GhostUpdate: True}
 
 STRETCHING = {TimeIntegrator: RK3, Formulation: "Conservative",
               SpaceDiscretisation: FD_C_4}
+
+FORCES = {SpaceDiscretisation: FD_C_2}
diff --git a/HySoP/hysop/domain/__init__.pyc b/HySoP/hysop/domain/__init__.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb9d871e849079e5b45218f22902b5c41c4e0b43
Binary files /dev/null and b/HySoP/hysop/domain/__init__.pyc differ
diff --git a/HySoP/hysop/domain/__pycache__/box.cpython-27-PYTEST.pyc b/HySoP/hysop/domain/__pycache__/box.cpython-27-PYTEST.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0cc5ab1dc04e6bddfc70701bea5c2f47d04c39a5
Binary files /dev/null and b/HySoP/hysop/domain/__pycache__/box.cpython-27-PYTEST.pyc differ
diff --git a/HySoP/hysop/domain/box.py b/HySoP/hysop/domain/box.py
index 3cb0d050cbda2ad19fa9b0dd4570253e286341e2..fbea902f159455e834a8ceca98db08a330690e8f 100644
--- a/HySoP/hysop/domain/box.py
+++ b/HySoP/hysop/domain/box.py
@@ -4,8 +4,8 @@ Box-shaped domains definition.
 
 """
 from parmepy.domain.domain import Domain
-from parmepy.constants import np, PARMES_REAL, PARMES_INTEGER, PERIODIC, \
-    debug
+from parmepy.constants import PERIODIC, debug
+import parmepy.tools.numpywrappers as npw
 
 
 class Box(Domain):
@@ -16,40 +16,47 @@ class Box(Domain):
     """
 
     @debug
-    def __init__(self, dimension=3, length=None, origin=None):
+    def __init__(self, length=None, origin=None, **kwds):
         """
         Create a Periodic Box from a dimension, length and origin.
         Parameters dimensions must coincide. Raise ValueError
         in case of inconsistent parameters dimensions.
-
-        @param dimension : Box dimension. Default: 3
         @param length : Box length. Default [1.0, ...]
         @param origin : Box minimum position. Default [0., ...]
         \code
         >>> import parmepy as pp
         >>> import numpy as np
         >>> b = pp.Box()
-        >>> (b.max == np.asarray([1.0, 1.0, 1.0])).all()
+        >>> (b.end == np.asarray([1.0, 1.0, 1.0])).all()
         True
 
         \endcode
         """
-        ## Space dimension
-        self.dimension = dimension
-        Domain.__init__(self, dimension)
+        if 'dimension' not in kwds:
+            if length is not None or origin is not None:
+                dim = [len(list(j)) for j in [length, origin]
+                       if j is not None]
+                kwds['dimension'] = dim[0]
+
+        super(Box, self).__init__(**kwds)
+
         ##  Box length.
         if length is None:
             length = [1.0] * self.dimension
         if origin is None:
             origin = [0.] * self.dimension
-        self.length = np.asarray(length, dtype=PARMES_REAL)
+        self.length = npw.const_realarray(length)
         ##  Box origin
-        self.origin = np.asarray(origin, dtype=PARMES_REAL)
+        self.origin = npw.const_realarray(origin)
+
         ## Maximum Box position. max = origin + length
-        self.max = self.origin + self.length
-        ## Boundary conditions type
-        self.boundaries = np.zeros((self.dimension), dtype=PARMES_INTEGER)
-        self.boundaries[:] = PERIODIC
+        self.end = self.origin + self.length
+        # set periodic boundary conditions
+        if self.boundaries is None:
+            self.boundaries = npw.asdimarray([PERIODIC] * self.dimension)
+        else:
+            msg = 'Boundary conditions must be periodic.'
+            assert list(self.boundaries).count(PERIODIC) == self.dimension, msg
 
     def __str__(self):
         """
@@ -60,5 +67,12 @@ class Box(Domain):
             "D box (parallelepipedic or rectangular) domain : \n"
 
         s += "   origin : " + str(self.origin) + ", maxPosition :" \
-             + str(self.max) + ", lengths :" + str(self.length) + "."
+             + str(self.end) + ", lengths :" + str(self.length) + "."
         return s
+
+    def __eq__(self, other):
+        c1 = (self.length == other.length).all()
+        c2 = (self.origin == other.origin).all()
+        c3 = (self.boundaries == other.boundaries).all()
+        c4 = self.currentTask() == other.currentTask()
+        return c1 and c2 and c3 and c4
diff --git a/HySoP/hysop/domain/box.pyc b/HySoP/hysop/domain/box.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e7d2d134452cff756849cde8aba7c97ecffe7270
Binary files /dev/null and b/HySoP/hysop/domain/box.pyc differ
diff --git a/HySoP/hysop/domain/domain.py b/HySoP/hysop/domain/domain.py
index a9cbe835456083d18b3b2d0ff1dab9094721a7b0..4110129bfbade2b778ba2504e7039023ed69fc26 100644
--- a/HySoP/hysop/domain/domain.py
+++ b/HySoP/hysop/domain/domain.py
@@ -5,8 +5,11 @@ Abstract interface for physical domains description.
 
 """
 from abc import ABCMeta, abstractmethod
-from parmepy.constants import debug
+from parmepy.constants import debug, DEFAULT_TASK_ID, PERIODIC
 from parmepy.mpi.topology import Cartesian
+from parmepy.mpi import main_rank, main_size, main_comm
+from parmepy.tools.parameters import MPI_params
+import numpy as np
 
 
 class Domain(object):
@@ -20,9 +23,11 @@ class Domain(object):
 
     @debug
     @abstractmethod
-    def __init__(self, dimension):
+    def __init__(self, dimension=3, proc_tasks=None, bc=None):
         """ Constructor
         @param dimension integer : domain dimension.
+        @param proc_tasks : task - mpi processes connection.
+        proc_tasks[n] = 12 means that task 12 owns proc n.
         """
         ## Domain dimension.
         self.dimension = dimension
@@ -31,57 +36,145 @@ class Domain(object):
         ## the comparison operator in the class parmepy.mpi.topology.Cartesian.
         self.topologies = {}
 
-    def getOrCreateTopology(self, topoDim, gridResolution,
-                            topoResolution=None, ghosts=None,
-                            precomputed=False, localres=None,
-                            offset=None, fixedResolution=False,
-                            comm=None):
+        ## Connectivity between mpi tasks and proc numbers :
+        ## self.task_of_proc[i] returns the task which is bound to proc of
+        ## rank i in main_comm.
+        # Warning FP: rank in main_comm! We consider that each proc has
+        # one and only one task.
+        # Maybe we should change this and allow proc_tasks in subcomm
+        # but at the time it's not necessary.
+        if proc_tasks is None:
+            self._tasks_on_proc = [DEFAULT_TASK_ID, ] * main_size
+            comm_s = main_comm
+        else:
+            assert len(proc_tasks) == main_size
+            self._tasks_on_proc = proc_tasks
+            # Split main comm according to the defined tasks.
+            comm_s = main_comm.Split(color=proc_tasks[main_rank],
+                                     key=main_rank)
+
+        ## The sub-communicator corresponding to the task that owns
+        ## the current process.
+        self.comm_task = comm_s
+        ## Boundary conditions type.
+        ## At the moment, only periodic conditions are allowed.
+        self.boundaries = bc
+
+    def isOnTask(self, params):
+        """
+        @param params : a parmepy.mpi.MPI_params object
+        or an int (task number)
+        @return true if params.task_id (or params if int) corresponds with
+        task_id of the current proc.
+        """
+        if params.__class__ is MPI_params:
+            return params.task_id == self._tasks_on_proc[main_rank]
+        else:
+            return params == self._tasks_on_proc[main_rank]
+
+    def tasks_on_proc(self, index):
+        """
+        @param index: proc number
+        @return task number of mpi proc 'index'
+        """
+        return self._tasks_on_proc[index]
+
+    def tasks_list(self):
+        """
+        @return the connectivity between proc. numbers and their associated
+        task (numbers in main_comm).
+        """
+        return self._tasks_on_proc
+
+    def currentTask(self):
+        """
+        @return task number of the current proc.
         """
-        This routines checks if a topology is present in the list
-        of topologies of the domain.
+        return self._tasks_on_proc[main_rank]
+
+    def create_topology(self, discretization, dim=None, mpi_params=None,
+                        shape=None, cutdir=None):
+        """
+        Create or return an existing parmepy.mpi.topology.
+
         Either it gets the topology corresponding to the input arguments
-        (topodim, gridResolution, ...) if it exists (in the sense of the
-        comparison operator defined in parmepy.mpi.topology.Cartesian)
-        or it creates a new topology and register it in the topology list.
-        @param topoDim : dimension of the underlying MPI topology (i.e.
-        process layout)
-        @param gridResolution : resolution of the global mesh
-        @param topoResolution : resolution of the mpi process distribution
+        if it exists (in the sense of the comparison operator defined in
+        parmepy.mpi.topology.Cartesian)
+        or it creates a new topology and register it i n the topology list.
+        @param domain : the geometry; it must be a box.
+        @param discretization : a parmepy.tools.parameters.Discretization
+        with:
+        - resolution = Number of points in the domain
+        in each direction. We assume that first point corresponds
+        to origin, and last point to boundary point,
+        whatever the boundary type is.
+        That is x[0] = domain.origin and
+        x[Discretization.resolution-1] = domain.Lengths_x.
+        - ghosts =  number of points in the ghost layer
+        @param dim : dimension of the topology
+        @param mpi_params : a parmepy.tools.parameters.MPI_params, with:
+        - comm : MPI communicator used to create this topology
+         (default = main_comm)
+        - task_id : id of the task that owns this topology.
+        @param isperiodic : periodicity of the topology (in each direction)
+        @param cutdir : array of bool, set cutdir[dir] = True if you want
+        to distribute data through direction dir.
+        @param shape : topology resolution
+        (i.e process layout in each direction).
         @return the required topology.
         """
-        if topoResolution is None:
-            newTopo = Cartesian(self, topoDim, gridResolution, ghosts=ghosts,
-                                comm=comm)
-        elif precomputed:
-            newTopo = Cartesian.withPrecomputedResolution(self, topoResolution,
-                                                          gridResolution,
-                                                          localres=localres,
-                                                          offset=offset,
-                                                          ghosts=ghosts,
-                                                          comm=comm)
+        # set task number
+        tid = self.currentTask()
+        if mpi_params is None:
+            mpi_params = MPI_params(comm=self.comm_task, task_id=tid)
+        else:
+            msg = 'Try to create a topology on a process that does not'
+            msg += ' belong to the current task.'
+            assert mpi_params.task_id == tid, msg
+        newTopo = Cartesian(self, discretization, dim=dim,
+                            mpi_params=mpi_params, shape=shape,
+                            cutdir=cutdir)
+        newid = newTopo.get_id()
+        return self.topologies[newid]
+
+    def create_plane_topology_from_mesh(self, localres, global_start,
+                                        cdir=None, **kwds):
+        """
+        Create or return an existing parmepy.mpi.topology.
 
+        Define a 'plane' (1D) topology for a given mesh resolution.
+        This function is to be used when topo/discretization features
+        come from an external routine (e.g. scales or fftw)
+        @param localres : local mesh resolution
+        @param global_start : global indices of the lowest point
+        of the local mesh.
+        @param cdir : direction of cutting (i.e. normal to mpi planes)
+        default = last if fortran order, first if C order.
+        """
+        tid = self.currentTask()
+        if 'mpi_params' not in kwds:
+            kwds['mpi_params'] = MPI_params(comm=self.comm_task, task_id=tid)
         else:
-            if fixedResolution:
-                topoCreation = Cartesian.withResolutionFixed
-            else:
-                topoCreation = Cartesian.withResolution
-            newTopo = topoCreation(self, shape=topoResolution,
-                                   globalMeshResolution=gridResolution,
-                                   ghosts=ghosts, comm=comm)
-        newid = newTopo.getId()
+            msg = 'Try to create a topology on a process that does not'
+            msg += ' belong to the current task.'
+            assert kwds['mpi_params'].task_id == tid, msg
+        newTopo = Cartesian.plane_precomputed(localres, global_start, cdir,
+                                              domain=self, **kwds)
+
+        newid = newTopo.get_id()
         return self.topologies[newid]
 
-    def checkTopo(self, newTopo):
+    def _checkTopo(self, newTopo):
         """
         Return the id of the input topology
-        if it exists in the present domain list, else return -1.
+        if it exists in the domain list, else return -1.
         @param newTopo : the topology  to check
         @return id of the topology if present else -1.
         """
         otherid = -1
         for top in self.topologies.values():
             if newTopo == top:
-                otherid = top.getId()
+                otherid = top.get_id()
                 break
         return otherid
 
@@ -94,17 +187,15 @@ class Domain(object):
         @return the id of the new registered topology
         or of the corresponding 'old one' if it already exists.
         """
-        otherid = self.checkTopo(newTopo)
+        otherid = self._checkTopo(newTopo)
         if otherid < 0:
-            self.topologies[newTopo.getId()] = newTopo
-            newTopo.setUp()
+            self.topologies[newTopo.get_id()] = newTopo
             newTopo.isNew = True
+            newid = newTopo.get_id()
         else:
             # No registration
-            newTopo = self.topologies[otherid]
             newTopo.isNew = False
-
-        newid = newTopo.getId()
+            newid = otherid
         return newid
 
     def remove(self, topo):
@@ -117,21 +208,37 @@ class Domain(object):
         @return either the od of the removed topology
         or -1 if nothing is done.
         """
-        otherid = self.checkTopo(topo)
+        otherid = self._checkTopo(topo)
         if otherid >= 0:
             self.topologies.pop(otherid)
         return otherid
 
-    def reset(self):
-        for topo in self.topologies.values():
-            del topo
-        self.topologies = {}
-
     def printTopologies(self):
         """
         Print all topologies of the domain.
         """
-        from parmepy.mpi import main_rank
         if main_rank == 0:
             for topo in self.topologies.values():
-                print (topo)
+                print topo
+
+    @abstractmethod
+    def __eq__(self, other):
+        """
+        Comparison of two domains
+        """
+
+    def __ne__(self, other):
+        """
+        Not equal operator
+        """
+        result = self.__eq__(other)
+        if result is NotImplemented:
+            return result
+        return not result
+
+    def i_periodic_boundaries(self):
+        """
+        @return list of directions where
+        boundaries are periodic
+        """
+        return np.where(self.boundaries == PERIODIC)[0]
diff --git a/HySoP/hysop/domain/domain.pyc b/HySoP/hysop/domain/domain.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..69cabd2b4cc0517e752ac9ed73cb2a79a413571d
Binary files /dev/null and b/HySoP/hysop/domain/domain.pyc differ
diff --git a/HySoP/hysop/domain/obstacle/__init__.py b/HySoP/hysop/domain/obstacle/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..be5911bbcc91dc597c5c18e7479042e779dfd672
--- /dev/null
+++ b/HySoP/hysop/domain/obstacle/__init__.py
@@ -0,0 +1,57 @@
+## @package parmepy.domain.obstacle
+# Obstacles description (geometry).
+#
+#
+# An 'obstacle' is the description of a sub-domain
+# of the main physical domain with one or more user-defined
+# functions of the space coordinates (at least).
+#
+# What for? \n
+# Mainly to provide some 'index sets' to penalization operator.\n
+# For a given obstacle, and a given discrete field, we must be able to call:
+# \code
+# cond = obstacle.discretize(topo)
+# field[cond] = 1.9
+# \endcode
+# This example means that the field (discretized on topology topo)
+# will be set to 1.9 everywhere inside the obstacle.
+#
+# 
+# Obviouslvy the index sets will depend on the discretization
+# of the domain (the underlying topology indeed).
+# So each obstacle handles a dictionnary of boolean arrays. The keys
+# of the dictionnaries are the topologies and the values some boolean arrays
+# which values are true on and inside the object and false outside.
+# \code
+# obstacle.ind[topo] = someBooleanArray
+# \endcode
+# Each component of the dictionnary is created using the method 'discretize':
+# \code
+# # Create the boolean array that represents the obstacle for topology topo:
+# someBooleanArray = obstacle.discretize(topo)
+# # So for a field already discretized on topo, we may call
+# field[someBooleanArray]
+# \endcode
+#
+# A more complete example : initialize a scalar field with one inside a sphere
+# and zero everywhere else.
+# 
+# \code
+# Lx = Ly = Lz = 2
+# dom = pp.Box(dimension=3, length=[Lx, Ly, Lz], origin=[-1., -1., -1.])
+# # Definition of a sphere in the middle of the domain
+# sphere = Sphere(dom, position=[0., 0., 0.], radius=0.5)
+# # A topology
+# topo = Cartesian(dom, 3, [33, 33, 33])
+# # A scalar field on the domain :
+# scal = pp.Field(domain=dom, name='Scalar')
+# # Discretization on topo:
+# scal_discr = scal.discretize(topo)
+# # scal set to 1. inside the obstacle:
+# condition = sphere.discretize(topo)
+# scal.discreteFields[topo][condition] = 1.
+# # equivalent to
+# scal_discr[condition] = 1.
+# # to set a value everywhere except in the sphere
+# scal_discr[not condition] = 8.
+# \endcode
diff --git a/HySoP/hysop/domain/obstacle/controlBox.py b/HySoP/hysop/domain/obstacle/controlBox.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb24a8442dfe067c5c11c61c912fd541772b4b24
--- /dev/null
+++ b/HySoP/hysop/domain/obstacle/controlBox.py
@@ -0,0 +1,301 @@
+"""
+@file controlBox.py
+Define a sub-domain with a box-liked shape.
+"""
+from parmepy.domain.obstacle.obstacle import Obstacle
+from parmepy.domain.obstacle.planes import SubSpace, SubPlane
+from parmepy.mpi.mesh import SubMesh
+import numpy as np
+import parmepy.tools.numpywrappers as npw
+
+
+class ControlBox(Obstacle):
+    """
+    Build a sub-domain, box-shaped
+    ==> define set of indices inside this domain (ind member)
+    and set of indices belonging to surfaces of this domain (slices members).
+    Useful to define control volume to perform integration.
+    See for example parmepy.operator.monitor.forces
+    """
+
+    def __init__(self, origin, lengths, **kwds):
+        """
+        Build the volume of control
+        @param origin : coordinates of the lowest point in the sub-domain
+        @param lengths : lengths of box sides.
+        """
+        super(ControlBox, self).__init__(**kwds)
+
+        ## Lowest point of the box
+        self.origin = npw.asrealarray(origin)
+        ## Box's sides dimension
+        self.lengths = npw.asrealarray(lengths)
+        ## Dictionnary of local meshes, such that
+        ## mesh[topo] is the restriction of topo.mesh
+        ## to the current control box.
+        self.mesh = {}
+        self.upper = None
+        self.lower = None
+        self.upperS = None
+        self.lowerS = None
+        self.slices = {}
+        self.indReduced = {}
+        self._boxCreated = False
+        ## Check if the defined box contains points
+        ## for a given topology. self.isEmpty[topo] = False
+        ## if some grid points are inside the box on
+        ## the current processor for topo discretization.
+        self.isEmpty = {}
+        ## Dict of local coordinates for a given topology
+        self.coords = {}
+        ## Global resolution of the obstacle (plane, sub space ...)
+        ## At the time, it's computed only for subspaces, by calling
+        ## globalResolution method.
+        self.gRes = None
+        ## Global index in the original topology of the 'lowest' point
+        ## of the obstacle. Only for subspaces.
+        self.gstart = None
+
+    def createVolumeAndSides(self, spaceStep):
+        """
+        @param[in] array of size self._dim, space step size in each direction
+        This value will be used to compute a tolerance and detect
+        points inside the box.
+        """
+        # Build Half-spaces indices list in all directions
+        normalUp = np.identity(self._dim)
+        normalDown = np.identity(self._dim) * -1
+        pointsUp = npw.zeros((self._dim, self._dim))
+        # Control box will be used for integration, so we remove the
+        # last point in the grid.
+        boxlengths = self.lengths - spaceStep
+        tol = spaceStep * 0.5
+
+        for i in xrange(self._dim):
+            pointsUp[:, i] = self.origin
+        pointsUp.flat[::self._dim + 1] += self.lengths
+        # -- Control volume : union of two halfspaces --
+        if self.upper is None:
+            self.upper = [SubSpace(domain=self.domain, normal=normalUp[:, i],
+                                   point=pointsUp[:, i],
+                                   lengths=boxlengths,
+                                   epsilon=tol[i])
+                          for i in xrange(self._dim)]
+        if self.lower is None:
+            self.lower = [SubSpace(domain=self.domain, normal=normalDown[:, i],
+                                   point=self.origin, lengths=boxlengths,
+                                   epsilon=tol[i])
+                          for i in xrange(self._dim)]
+
+        # Create objects to describe the sides of the box
+        if self.upperS is None:
+            self.upperS = [SubPlane(domain=self.domain, normal=normalUp[:, i],
+                                    point=pointsUp[:, i],
+                                    lengths=boxlengths,
+                                    epsilon=tol[i])
+                           for i in xrange(self._dim)]
+
+        if self.lowerS is None:
+            self.lowerS = [SubPlane(domain=self.domain,
+                                    normal=normalDown[:, i],
+                                    point=self.origin, lengths=boxlengths,
+                                    epsilon=tol[i])
+                           for i in xrange(self._dim)]
+        self._boxCreated = True
+
+    def discretize(self, topo):
+        """
+        Discretize the box volume and its surfaces.
+        @param topo : the topology that described the discretization.
+        """
+        # Check if already done. If so, this function has no effect.
+        if topo not in self.ind.keys():
+            spaceStep = topo.mesh.space_step
+            # -- Control volume : union of two halfspaces --
+            if not self._boxCreated:
+                self.createVolumeAndSides(spaceStep)
+
+            # Discretize all volume and surfaces of
+            # the box for topo
+            for i in xrange(self._dim):
+                self.lower[i].discretize(topo)
+                self.upper[i].discretize(topo)
+                self.lowerS[i].discretize(topo)
+                self.upperS[i].discretize(topo)
+
+            # 1 -- Compute list of indices inside the box,
+            #  for topo --> ind[topo]
+            self.ind[topo] = []
+
+            self.ind[topo].append(np.logical_and(self.upper[0].ind[topo][0],
+                                                 self.lower[0].ind[topo][0]))
+            for i in xrange(1, self._dim):
+                cond = np.logical_and(self.upper[i].ind[topo][0],
+                                      self.lower[i].ind[topo][0])
+                self.ind[topo][0] = np.logical_and(self.ind[topo][0], cond)
+
+            ind = np.where(self.ind[topo][0])
+
+            # 2 -- Convert ind[topo] (array of bool) to slices
+            # which may be more convenient for computations
+            # --> slices[topo]
+            # + mesh[topo], a parmepy.mpi.SubMesh, useful
+            # to get local coordinates and so on
+            if ind[0].size == 0:
+                self.slices[topo] = [slice(0, 0) for i in xrange(self._dim)]
+                self.mesh[topo] = None
+                self.isEmpty[topo] = True
+            else:
+                self.isEmpty[topo] = False
+                ic = topo.mesh.iCompute
+                lstart = [ind[i].min() if ind[i].size > 0 else None
+                          for i in xrange(self._dim)]
+                lstart = npw.asintarray([max(lstart[i], ic[i].start)
+                                         for i in xrange(self._dim)])
+                end = [ind[i].max() for i in xrange(self._dim)]
+                end = npw.asintarray([min(end[i], ic[i].stop - 1)
+                                      for i in xrange(self._dim)])
+                # slice(start,end) --> end not included, so +1
+                end += 1
+                resol = end - lstart + 2 * topo.mesh.discretization.ghosts
+                gstart = lstart + topo.mesh.global_start
+                gstart -= topo.mesh.discretization.ghosts
+                self.mesh[topo] = SubMesh(self.domain,
+                                          topo.mesh.discretization,
+                                          gstart, resol)
+                self.slices[topo] = [slice(lstart[i], end[i])
+                                     for i in xrange(self._dim)]
+                coords = []
+                for i in xrange(self._dim):
+                    cc = topo.mesh.coords[i].flat[self.slices[topo][i]]
+                    coords.append(cc)
+                coords = tuple(coords)
+                self.coords[topo] = np.ix_(*coords)
+
+                # --> self.ind[topo][0] components are True
+                # for points inside the volume
+                # --> self.slices[topo] represent the same thing
+                # but using slices of numpy arrays.
+                # Usage (vd being a numpy array discretized
+                # on the whole domain, cb a control box):
+                # # Set values to all points inside the control box
+                # vd[cb.ind[topo][0]] = ...
+                # # Get a sub-array of vd representing the control box
+                # # and use it
+                # result[...] = vd[cb.slices] + ...
+                # The important difference between slices and ind is:
+                # 1 - vd[ind] returns a 1D array whatever vd shape is.
+                # 2 - vd[slices] return an array of the same dim as vd,
+                # with shape given by slices.
+
+            return self.ind[topo]
+
+    def sub(self, obstacle, topo):
+        """
+        Remove all points corresponding to the input obstacle from
+        the current control box
+        """
+        if topo not in self.ind.keys():
+            obstacle.discretize(topo)
+            self.discretize(topo)
+            self.indReduced[topo] = []
+            # Warning : obstacle may have several layers
+            cond = obstacle.ind[topo][0]
+            for i in xrange(1, len(obstacle.ind[topo])):
+                cond = npw.asboolarray(np.logical_or(cond,
+                                                     obstacle.ind[topo][i]))
+            cond = np.logical_not(cond)
+            self.indReduced[topo].append(np.logical_and(self.ind[topo][0],
+                                                        cond))
+        return self.indReduced[topo][-1]
+
+    def integrate_on_proc(self, field, topo, useSlice=True, component=0):
+        """
+        integrate field on the box
+        """
+        if useSlice:
+            cond = self.slices[topo]
+        else:
+            iC = topo.mesh.iCompute
+            cond = self.ind[topo][0][iC]
+        dvol = npw.prod(topo.mesh.space_step)
+        result = npw.real_sum(field.discretize(topo)[component][cond])
+        result *= dvol
+        return result
+
+    def integrate(self, field, topo, useSlice=True,
+                  component=0, root=0, mpiall=True):
+        res = self.integrate_on_proc(field, topo, useSlice, component)
+        if mpiall:
+            return topo.comm.allreduce(res)
+        else:
+            return topo.comm.reduce(res, root=root)
+
+    def integrateOnSurface(self, field, topo, normalDir=0, up=True,
+                           useSlice=True, component=0, root=0, mpiall=True):
+        """
+        integrate field on top (if up is True) or down surface
+        normal to a direction
+        """
+        res = self.integrateOnSurf_proc(field, topo, normalDir, up, useSlice,
+                                        component)
+        if mpiall:
+            return topo.comm.allreduce(res)
+        else:
+            return topo.comm.reduce(res, root=root)
+
+    def integrateOnSurf_proc(self, field, topo, normalDir=0,
+                             up=True, useSlice=True, component=0):
+        """
+        integrate field on top and down surfaces normal to a direction
+        """
+        if up:
+            surf = self.upperS[normalDir]
+        else:
+            surf = self.lowerS[normalDir]
+        if useSlice:
+            cond = surf.slices[topo]
+        else:
+            iC = topo.mesh.iCompute
+            cond = surf.ind[topo][0][iC]
+        dirs = np.logical_not(np.arange(self._dim) == normalDir)
+        dS = npw.prod(topo.mesh.space_step[dirs])
+        result = npw.real_sum(field.discretize(topo)[component][cond])
+        result *= dS
+        return result
+
+    def globalResolution(self, parent_topo):
+        """
+        Compute 'global resolution' of the subplane
+        """
+        # We create a false topology, with only one proc
+        # to get the global resolution for the plane.
+        # This could also be done with local computation
+        # sum but that would need a lot of communications.
+        if parent_topo.rank == 0:
+            color = 0
+        else:
+            color = 1
+        subcomm = parent_topo.comm.Split(color)
+        dimension = self.domain.dimension
+        tmp = None
+        if parent_topo.rank == 0:
+            resolution = parent_topo.globalMeshResolution
+            ghosts = parent_topo.mesh.discretization.ghosts
+            topo = self.domain.getOrCreateTopology(3, resolution,
+                                                   ghosts=ghosts, comm=subcomm)
+            self.discretize(topo)
+            sl = self.slices[topo]
+            self.gRes = [sl[i].stop - sl[i].start for i in xrange(dimension)]
+            self.gstart = [sl[i].start for i in xrange(dimension)]
+            # if the topology has been created just to
+            # get the global resolution, we can remove it
+            if topo.isNew:
+                self.domain.remove(topo)
+                self.slices.pop(topo)
+                self.ind.pop(topo)
+            tmp = self.gRes + self.gstart
+        tmp = parent_topo.comm.bcast(tmp)
+        self.gRes = tmp[:dimension]
+        self.gstart = tmp[dimension:]
+        return self.gRes
diff --git a/HySoP/hysop/domain/obstacle/disk.py b/HySoP/hysop/domain/obstacle/disk.py
new file mode 100644
index 0000000000000000000000000000000000000000..f57cbcc9284fac1aeab3a38190cc1b8f8490694e
--- /dev/null
+++ b/HySoP/hysop/domain/obstacle/disk.py
@@ -0,0 +1,64 @@
+"""
+@file disk.py
+Rigid disk (2D)
+"""
+from parmepy.domain.obstacle.sphere import Sphere, HemiSphere
+import numpy as np
+import parmepy.tools.numpywrappers as npw
+
+
+class Disk(Sphere):
+    """
+    Disk in a 2D domain.
+    """
+
+    def __init__(self, **kwds):
+        """
+        Description of a disk in a domain.
+        @param domain : the physical domain that contains the sphere.
+        @param position : position of the center
+        @param radius : sphere radius, default = 1
+        @param porousLayers : a list of thicknesses
+        for successive porous layers
+        radius is the inside sphere radius and thicknesses are given from
+        inside layer to outside one.
+        @param vd : velocity of the disk (considered as a rigid body),
+        default = 0.
+        """
+        super(Disk, self).__init__(**kwds)
+        assert self.domain.dimension == 2
+
+        def dist(x, y, R):
+            return npw.asarray(np.sqrt((x - self.position[0]) ** 2
+                                       + (y - self.position[1]) ** 2) - R)
+        self.chi = [dist]
+
+
+class HalfDisk(HemiSphere):
+    """
+    Half disk in a 2D domain.
+    """
+    def __init__(self, **kwds):
+        """
+        Constructor for the semi-disk.
+        @param domain : the physical domain that contains the sphere.
+        @param position : position of the center
+        @param radius : sphere radius, default = 1
+        (if box ...)
+        @param vd : velocity of the disk (considered as a rigid body),
+        default = 0.
+        """
+        super(HalfDisk, self).__init__(**kwds)
+        assert self.domain.dimension == 2
+
+        def dist(x, y, R):
+            """
+            """
+            return npw.asarray(np.sqrt((x - self.position[0]) ** 2
+                                       + (y - self.position[1]) ** 2) - R)
+        self.chi = [dist]
+
+        def LeftBox(x, y):
+            return x - self.position[0]
+
+        self.LeftBox = LeftBox
diff --git a/HySoP/hysop/domain/obstacle/obstacle.py b/HySoP/hysop/domain/obstacle/obstacle.py
new file mode 100644
index 0000000000000000000000000000000000000000..c052ed7aa218ec544d66958a060d55611592a888
--- /dev/null
+++ b/HySoP/hysop/domain/obstacle/obstacle.py
@@ -0,0 +1,96 @@
+"""@file obstacle.py
+
+General interface to define a new geometry
+inside a domain (sphere, control box ...)
+"""
+import numpy as np
+
+
+class Obstacle(object):
+    """Ddescription of a physical obstacle.
+    An obstacle is the geometrical description of
+    a physical sub-domain.
+    """
+    def __init__(self, domain, formula=None, vd=0.0):
+        """ Constructor
+        @param domain : the domain that contains this obstacle.
+        @param formula : a list of functions that describe
+        the geometry of the obstacle.
+        @param vd : velocity of the obstacle (considered as a rigid body),
+        default = 0.
+        """
+        ## Domain.
+        self.domain = domain
+        from parmepy.domain.box import Box
+        assert isinstance(domain, Box),\
+            'Obstacle only implemented for box-like domains'
+        ## Obstacle dimension.
+        self._dim = domain.dimension
+        ## A function that describe the geometry of the obstacle.
+        ## see parmepy.domain.obstacle.
+        self.chi = []
+        if formula is not None:
+            if isinstance(formula, list):
+                for func in formula:
+                    self.chi.append = np.vectorize(func)
+            else:
+                self.chi = [np.vectorize(formula)]
+        ## A dictionnary of lists of indices ...
+        ## ind[topo][i] represents the set of points of the domain
+        ## discretized with topo that are in the area defined with chi[i].
+        self.ind = {}
+        ## Velocity of the center of mass of the obstacle
+        ## (considered as a rigid body)
+        self.vd = vd
+        ## Check if some grid points are present inside the current object
+        ## for the current mpi proc. If not, isEmpty[topo] = True.
+        self.isEmpty = {}
+
+
+    def discretize(self, topo):
+        """
+        For a given topology, computes the list of points in the domain
+        that belongs to the obstacle.
+        Add a new index into self.indices.
+        @param topo : topology specification for discretization
+        @return an array of bool such that array[i,j,k] = True if
+        point(i,j,k) is in the obstacle.
+
+        Note FP : there are two ways to 'save' which points are
+        in the obstacle : either we set a test function and
+        fill a boolean numpy array (case A) or we compute domain.dimension
+        lists of indice (B).
+        - case A : indices[topo] is a numpy array of the same
+        size as topo.mesh.
+         +++ : very fast to compute, very fast to apply
+         --- : needs more memory (size of bool * topo.mesh.size)
+        - case B : indices[topo] is a tupple of lists, like (ix, iy, iz).
+        A point inside the obstacle is thus given
+        by the indices ix[i], iy[i], iz[i]
+         +++ : needs less memory
+         --- : very slow to compute/apply compared to case A
+        Default choice = case A
+        \todo : provide a way for user to choose between case A and B.
+        Note FP 2: maybe that would be better to save indices in
+        operator (penalization) and not in obstacle, to save memory?
+        """
+        # first check if we have already compute indices for
+        # this topology
+        if topo not in self.ind[0].keys():
+            # for each indicator function
+            self.ind[topo] = []
+            for func in self.chi:
+                # current function position
+                i = self.chi.index(func)
+                # apply indicator function on local mesh for the required topo
+                self.ind[topo].append(self.chi[i](*topo.mesh.coords) <= 0)
+
+        return self.ind[topo]
+
+    def _isempty(self, topo):
+        ilist = np.where(self.ind[topo])
+        if ilist[0].size == 0:
+            self.isEmpty[topo] = True
+        else:
+            self.isEmpty[topo] = False
+        
diff --git a/HySoP/hysop/domain/obstacle/planes.py b/HySoP/hysop/domain/obstacle/planes.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5f2edbdb29bec969222b413441d27be727708fa
--- /dev/null
+++ b/HySoP/hysop/domain/obstacle/planes.py
@@ -0,0 +1,359 @@
+"""
+@file planes.py
+Plate-like sub-domains at boundaries, normal
+to a given direction.
+"""
+from parmepy.domain.obstacle.obstacle import Obstacle
+import numpy as np
+import parmepy.tools.numpywrappers as npw
+
+
+class HalfSpace(Obstacle):
+    """
+    Divide domain into two sub-spaces, on each side of a plane
+    defined by its normal and a point.
+    Indices of this.ind describe the half-space below the plane,
+    'normal' being the outward normal of the plane.
+    """
+
+    def __init__(self, normal, point, epsilon=1e-2, **kwds):
+        """
+        Half space define by the points of the domain on one side
+        of a plane.
+        @param domain : the physical domain that contains the plane
+        @param normal : outward normal
+        @param point : coordinates of a point of the plane.
+        @param epsilon : tolerance
+        """
+        super(HalfSpace, self).__init__(**kwds)
+        assert epsilon > 0.0, 'Tolerance value must be positive'
+        ## Tolerance used to considered that points at the boundary are
+        ## in the subspace. Good choice may be grid space_step / 2.
+        self.epsilon = epsilon
+        ## Direction of the normal to the plate (0:x, 1:y, 2:z))
+        ## normal is the 'outer' normal of the 'in' subspace.
+        self.normal = npw.asintarray(normal)
+        self.point = point
+        self.origin = npw.asrealarray(point)
+
+        def Outside(*coords):
+            return sum([(coords[i] - self.point[i]) * self.normal[i]
+                        for i in xrange(self.domain.dimension)])
+
+        ## Test function for half-space.
+        ## Positive value if outside subdomain else negative
+        self.chi = [Outside]
+        self.slices = {}
+        ## Global resolution of the obstacle (plane, sub space ...)
+        ## At the time, it's computed only for subspaces, by calling
+        ## globalResolution method.
+        self.gRes = None
+        ## Global index in the original topology of the 'lowest' point
+        ## of the obstacle. Only for subspaces.
+        self.gstart = None
+
+    def discretize(self, topo):
+        # first check if we have already compute indices for
+        # this topology
+
+        if topo not in self.ind.keys():
+            self.ind[topo] = []
+            # apply indicator function on topo local mesh
+            cond = npw.asarray(self.chi[0](*topo.mesh.coords) <= self.epsilon)
+            self.ind[topo].append(cond)
+            self._isempty(topo)
+        return self.ind[topo]
+
+    def __str__(self):
+        s = 'Plane normal to vector' + str(self.normal)
+        s += ' going through point ' + str(self.point)
+        return s
+
+
+class Plane(HalfSpace):
+    """
+    A plane in the domain, defined by its normal and a point.
+    Indices of plane.ind describe the points belonging to the plane.
+    """
+    def discretize(self, topo):
+        # first check if we have already compute indices for
+        # this topology
+
+        if topo not in self.ind.keys():
+            self.ind[topo] = []
+            # apply indicator function on topo local mesh
+            cond = npw.abs(self.chi[0](*topo.mesh.coords)) < self.epsilon
+            self.ind[topo].append(cond)
+            self._isempty(topo)
+
+            # assert that the plane is a real surface, i.e.
+            # only one value for coords[normalDir].
+            # The expr is a bit tricky but it works ...
+            ndir = np.where(self.normal != 0)[0][0]
+            assert assertSubPlane(ndir, self.ind[topo][0], *topo.mesh.coords),\
+                'Your plane is not a surface but a volume.\
+                Please reduce epsilon value.'
+
+        return self.ind[topo]
+
+
+class SubSpace(HalfSpace):
+    """
+    Define a rectangular space in a plane normal to one
+    coord. axis and the subspace below this surface.
+    'Below' = direction opposite to the outward normal of the plane
+    (input param)
+    """
+    def __init__(self, lengths, **kwds):
+        """
+        @param domain : the physical domain that contains the space
+        @param normal : outward normal
+        @param point : coordinates of a point of the plane.
+        @param lengths : lengths of the subplane
+        @param epsilon : tolerance
+        @param vd : velocity of the obstacle (considered as a rigid body),
+        default = 0.
+        """
+        super(SubSpace, self).__init__(**kwds)
+
+        def dist(cdir, val, *coords):
+            return coords[cdir] - val
+
+        self.dist = dist
+        self.max = self.origin + npw.asrealarray(lengths)
+        self.lengths = npw.asrealarray(lengths)
+        ndir = np.where(self.normal != 0)[0][0]
+        if self.normal[ndir] > 0:
+            self.max[ndir] = self.origin[ndir]
+        elif self.normal[ndir] < 0:
+            self.max[ndir] = self.domain.max[ndir]
+        # Only implemented for planes orthogonal to coord. axes
+        assert len(self.normal[self.normal == 0]) == self.domain.dimension - 1
+        self.coords = {}
+
+    def discretize(self, topo):
+        # first check if we have already compute indices for
+        # this topology
+        condMax = [0] * self.domain.dimension
+        condMin = [0] * self.domain.dimension
+        if topo not in self.ind.keys():
+            self.ind[topo] = []
+            # apply indicator function on topo local mesh
+            coords = topo.mesh.coords
+            cond = npw.asboolarray(self.chi[0](*coords) < self.epsilon)
+            indices = np.where(self.normal == 0)[0]
+
+            for i in indices:
+                condMax[i] = self.dist(i, self.max[i], *coords) < self.epsilon
+                condMin[i] = self.dist(i, self.origin[i], *coords) > - self.epsilon
+                condMin[i] = np.logical_and(condMax[i], condMin[i])
+                cond = npw.asarray(np.logical_and(cond, condMin[i]))
+
+            self.ind[topo].append(cond)
+            self._isempty(topo)
+        return self.ind[topo]
+
+
+class SubPlane(SubSpace):
+    """
+    Define a rectangular surf in a plane normal to one
+    coord. axis.
+    """
+    def discretize(self, topo):
+        # first check if we have already compute indices for
+        # this topology
+        dim = self.domain.dimension
+        condMax = [0] * dim
+        condMin = [0] * dim
+        if topo not in self.ind.keys():
+            self.ind[topo] = []
+            # apply indicator function on topo local mesh
+            coords = topo.mesh.coords
+            cond = npw.abs(self.chi[0](*coords)) < self.epsilon
+            indices = np.where(self.normal == 0)[0]
+            for i in indices:
+                condMax[i] = self.dist(i, self.max[i], *coords) < self.epsilon
+                condMin[i] = self.dist(i, self.origin[i], *coords) > -self.epsilon
+                condMin[i] = np.logical_and(condMax[i], condMin[i])
+                cond = npw.asboolarray(np.logical_and(cond, condMin[i]))
+
+            self.ind[topo].append(cond)
+            ilist = np.where(cond)
+            if ilist[0].size == 0:
+                self.slices[topo] = [slice(0, 0) for i in xrange(dim)]
+                self.isEmpty[topo] = True
+            else:
+                self.isEmpty[topo] = False
+                start = [ilist[i].min() for i in xrange(dim)]
+                # Ghost points must not be included into surf. points
+                ic = topo.mesh.iCompute
+                start = [max(start[i], ic[i].start) for i in xrange(dim)]
+                end = [ilist[i].max() for i in xrange(dim)]
+                end = npw.asintarray([min(end[i], ic[i].stop - 1)
+                                      for i in xrange(dim)])
+                end += 1
+                ndir = np.where(self.normal != 0)[0][0]
+                end[ndir] = start[ndir] + 1
+                self.slices[topo] = [slice(start[i], end[i])
+                                     for i in xrange(dim)]
+                assert assertSubPlane(ndir, cond, *topo.mesh.coords),\
+                    'Your plane is not a surface but a volume.\
+                Please reduce epsilon value.'
+            subcoords = []
+            # !! Warning : slices will be used for integration,
+            # so the last point in each dir is not included.
+            # Same thing for coords.
+            for i in xrange(dim):
+                subcoords.append(coords[i].flat[self.slices[topo][i]])
+            subcoords = tuple(subcoords)
+            self.coords[topo] = np.ix_(*subcoords)
+        return self.ind[topo]
+
+    def globalResolution(self, parent_topo):
+        """
+        Compute 'global resolution' of the subplane
+        """
+        # We create a false topology, with only one proc
+        # to get the global resolution for the plane.
+        # This could also be done with local computation
+        # sum but that would need a lot of communications.
+        if parent_topo.rank == 0:
+            color = 0
+        else:
+            color = 1
+        subcomm = parent_topo.comm.Split(color)
+        dimension = self.domain.dimension
+        tmp = None
+        if parent_topo.rank == 0:
+            resolution = parent_topo.globalMeshResolution
+            ghosts = parent_topo.ghosts
+            topo = self.domain.getOrCreateTopology(3, resolution,
+                                                   ghosts=ghosts, comm=subcomm)
+            self.discretize(topo)
+            sl = self.slices[topo]
+            self.gRes = [sl[i].stop - sl[i].start for i in xrange(dimension)]
+            self.gstart = [sl[i].start for i in xrange(dimension)]
+            # if the topology has been created just to
+            # get the global resolution, we can remove it
+            if topo.isNew:
+                self.domain.remove(topo)
+                self.slices.pop(topo)
+                self.ind.pop(topo)
+            tmp = self.gRes + self.gstart
+        tmp = parent_topo.comm.bcast(tmp)
+        self.gRes = tmp[:dimension]
+        self.gstart = tmp[dimension:]
+        return self.gRes
+
+
+class PlaneBoundaries(Obstacle):
+    """
+    Defines top and down (meaning for min and max value in
+    a given direction) planes at boundaries.
+    All points in the spaces above the top plane and below the down plane
+    will be included in the PlaneBoundaries list of indices.
+    Thickness of the top/down areas is given as an input param.
+    Example for z dir:
+    \f$ \{x,y,z\} \ for \ z_{max} - \epsilon \leq z \leq z_{max} + \epsilon
+    \ or \ z_{min} - \epsilon \leq z \leq z_{min}\f$
+    """
+
+    def __init__(self, normal_dir, thickness=0.1, **kwds):
+        """
+        Description of a sphere in a domain.
+        @param domain : the physical domain that contains the sphere.
+        @param thickness : thickness of boundary areas
+        @param vd : velocity of obstacle (considered as a rigid body),
+        default = 0.
+        """
+        super(PlaneBoundaries, self).__init__(**kwds)
+        assert thickness > 0.0, 'Plate thickness must be positive'
+        ## Thickness/2
+        self.thickness = thickness
+        ## Direction of the normal to the plate (0:x, 1:y, 2:z))
+        normalUp = np.zeros((self.domain.dimension))
+        normalUp[normal_dir] = -1
+        pointUp = npw.zeros((self.domain.dimension))
+        pointUp[normal_dir] = self.domain.max[normal_dir] - thickness
+        self.upper = HalfSpace(domain=self.domain, normal=normalUp, point=pointUp,
+                               epsilon=1e-3)
+        normalDown = np.zeros((self.domain.dimension))
+        normalDown[normal_dir] = 1
+        pointDown = npw.zeros((self.domain.dimension))
+        pointDown[normal_dir] = self.domain.origin[normal_dir] + thickness
+        self.lower = HalfSpace(domain=self.domain, normal=normalDown,
+                               point=pointDown, epsilon=1e-3)
+
+    def discretize(self, topo):
+        # first check if we have already compute indices for
+        # this topology
+
+        self.lower.discretize(topo)
+        self.upper.discretize(topo)
+        if topo not in self.ind.keys():
+            # Warning FP : ind[topo] must be a list to be coherent
+            # with sphere definition, where porous layers are allowed.
+            # todo if required : add porous layers for planes.
+            self.ind[topo] = []
+            self.ind[topo].append(np.logical_or(self.upper.ind[topo][0],
+                                                self.lower.ind[topo][0]))
+            self._isempty(topo)
+        return self.ind[topo]
+
+
+def assertSubPlane(ndir, ind, *coords):
+    dim = len(coords)
+    if dim == 2:
+        return assertline(ndir, ind, *coords)
+    elif dim == 3:
+        return assertsurface(ndir, ind, *coords)
+
+
+def assertsurface(nd, ind, *coords):
+
+    dim = len(coords)
+    shape = np.zeros(dim, dtype=np.int32)
+    shape[:] = [coords[i].shape[i] for i in xrange(dim)]
+    cshape = coords[nd].shape
+    if nd == 0:
+        return max([a.max() - a.min()
+                    for a in [coords[nd][ind[:, i, j]]
+                              for i in xrange(shape[1])
+                              for j in xrange(shape[2])
+                              if coords[nd][ind[:, i, j]].size
+                              > 0]] + [0]) == 0.
+    elif nd == 1:
+        return max([a.max() - a.min()
+                    for a in [coords[nd][ind[i, :, j].reshape(cshape)]
+                              for i in xrange(shape[0])
+                              for j in xrange(shape[2])
+                              if coords[nd][ind[i, :, j].reshape(cshape)].size
+                              > 0]] + [0]) == 0.
+
+    else:
+        return max([a.max() - a.min()
+                    for a in [coords[nd][ind[i, j, :].reshape(cshape)]
+                              for i in xrange(shape[0])
+                              for j in xrange(shape[1])
+                              if coords[nd][ind[i, j, :].reshape(cshape)].size
+                              > 0]] + [0]) == 0.
+
+
+def assertline(nd, ind, *coords):
+
+    dim = len(coords)
+    shape = np.zeros(dim, dtype=np.int32)
+    shape[:] = [coords[i].shape[i] for i in xrange(dim)]
+    cshape = coords[nd].shape
+    if nd == 0:
+        return max([a.max() - a.min()
+                    for a in [coords[nd][ind[:, i]]
+                              for i in xrange(shape[1])
+                              if coords[nd][ind[:, i]].size
+                              > 0]] + [0]) == 0.
+    elif nd == 1:
+        return max([a.max() - a.min()
+                    for a in [coords[nd][ind[i, :].reshape(cshape)]
+                              for i in xrange(shape[0])
+                              if coords[nd][ind[i, :].reshape(cshape)].size
+                              > 0]] + [0]) == 0.
diff --git a/HySoP/hysop/domain/obstacle/sphere.py b/HySoP/hysop/domain/obstacle/sphere.py
new file mode 100644
index 0000000000000000000000000000000000000000..1bb0d62c7f6ad0b9d8cb19803f1629a31e9f8702
--- /dev/null
+++ b/HySoP/hysop/domain/obstacle/sphere.py
@@ -0,0 +1,132 @@
+"""
+@file sphere.py
+Spherical or hemispherical sub-domain.
+"""
+from parmepy.domain.obstacle.obstacle import Obstacle
+import numpy as np
+import parmepy.tools.numpywrappers as npw
+
+
+class Sphere(Obstacle):
+    """
+    Spherical domain.
+    """
+
+    def __init__(self, position, radius=1.0, porousLayers=None, **kwds):
+        """
+        Description of a sphere in a domain.
+        @param domain : the physical domain that contains the sphere.
+        @param position : position of the center
+        @param radius : sphere radius, default = 1
+        @param porousLayers : a list of thicknesses
+        for successive porous layers
+        radius is the inside sphere radius and thicknesses are given from
+        inside layer to outside one.
+        @param vd : velocity of the sphere (considered as a rigid body),
+        default = 0.
+        """
+        super(Sphere, self).__init__(**kwds)
+        
+        ## Radius of the sphere
+        self.radius = radius
+        ## Center position
+        self.position = np.asarray(position)
+
+        def dist(x, y, z, R):
+            """
+            """
+            return npw.asarray(np.sqrt((x - self.position[0]) ** 2
+                                       + (y - self.position[1]) ** 2
+                                       + (z - self.position[2]) ** 2) - R)
+
+        self.chi = [dist]
+        ## List of thicknesses for porous layers
+        if porousLayers is None:
+            porousLayers = []
+        self.layers = porousLayers
+
+    def discretize(self, topo):
+        # first check if we have already compute indices for
+        # this topology
+
+        if topo not in self.ind.keys():
+            currentRadius = self.radius
+            self.ind[topo] = []
+            # First, internal sphere
+            args = (currentRadius,)
+            self.ind[topo].append(self.chi[0](*(topo.mesh.coords + args)) <= 0)
+            # Then each layer from inside to outside
+            # for each indicator function
+            for thickness in self.layers:
+                # apply indicator function on topo local mesh
+                args = (currentRadius,)
+                condA = self.chi[0](*(topo.mesh.coords + args)) > 0
+                args = (currentRadius + thickness,)
+                condB = self.chi[0](*(topo.mesh.coords + args)) <= 0
+                self.ind[topo].append(np.logical_and(condA, condB))
+                # update current radius
+                currentRadius = currentRadius + thickness
+            self._isempty(topo)
+        return self.ind[topo]
+
+    def __str__(self):
+        """ToString method"""
+        s = self.__class__.__name__ + ' of radius ' + str(self.radius)
+        s += ' and center position ' + str(self.position)
+        return s
+
+
+class HemiSphere(Sphere):
+    """
+    HemiSpherical domain.
+    Area defined by the intersection of a sphere and the volume where
+    x < xs for xs == x position of the center of the sphere.
+    """
+    def __init__(self, **kwds):
+        """
+        Description of a sphere in a domain.
+        @param domain : the physical domain that contains the sphere.
+        @param position : position of the center
+        @param radius : sphere radius, default = 1
+        @param porousLayers : a list of thicknesses
+        for successive porous layers
+        radius is the inside sphere radius and thicknesses are given from
+        inside layer to outside one.
+        @param vd : velocity of the sphere (considered as a rigid body),
+        default = 0.
+        """
+        super(HemiSphere, self).__init__(**kwds)
+
+        def LeftBox(x, y, z):
+            return x - self.position[0]
+        self.LeftBox = LeftBox
+
+    def discretize(self, topo):
+        # first check if we have already compute indices for
+        # this topology
+        if topo not in self.ind.keys():
+            currentRadius = self.radius
+            self.ind[topo] = []
+            # check if we are in the left half-box
+            cond0 = self.LeftBox(*(topo.mesh.coords)) <= 0
+            # First, internal sphere
+            args = (currentRadius,)
+            condA = self.chi[0](*(topo.mesh.coords + args)) <= 0
+            self.ind[topo].append(np.logical_and(condA, cond0))
+            # Then each layer from inside to outside
+            # for each indicator function
+            for thickness in self.layers:
+                # apply indicator function on topo local mesh
+                args = (currentRadius,)
+                condA = self.chi[0](*(topo.mesh.coords + args)) > 0
+                args = (currentRadius + thickness,)
+                condB = self.chi[0](*(topo.mesh.coords + args)) <= 0
+                np.logical_and(condA, condB, condA)
+                np.logical_and(condA, cond0, condA)
+                condA = npw.asarray(condA)
+                self.ind[topo].append(condA)
+                # update current radius
+                currentRadius = currentRadius + thickness
+            self._isempty(topo)
+
+        return self.ind[topo]
diff --git a/HySoP/hysop/domain/tests/parmesfftw.log b/HySoP/hysop/domain/tests/parmesfftw.log
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/HySoP/hysop/domain/tests/test_box.py b/HySoP/hysop/domain/tests/test_box.py
index 52e971a88df67c48cb5a45b6e6ae6dc05817d729..ecdd1f95b95395be95ae590aab8fb2c25877507d 100644
--- a/HySoP/hysop/domain/tests/test_box.py
+++ b/HySoP/hysop/domain/tests/test_box.py
@@ -1,30 +1,155 @@
 """
 Testing parmepy.domain.box.Box
 """
-from parmepy.constants import np, PERIODIC
+from parmepy.constants import PERIODIC, DEFAULT_TASK_ID
 from parmepy.domain.box import Box
+from numpy import allclose, ones_like, zeros_like
+import parmepy.tools.numpywrappers as npw
+from parmepy.mpi import main_size, main_rank
 
 
 def test_create_box1():
-    """Test default parameters"""
     dom = Box()
     assert dom.dimension == 3
-    assert np.allclose(dom.length, np.ones_like(dom.length))
-    assert np.allclose(dom.origin, np.zeros_like(dom.origin))
+    assert allclose(dom.length, ones_like(dom.length))
+    assert allclose(dom.origin, zeros_like(dom.origin))
     assert [b == PERIODIC for b in dom.boundaries]
+    cond = [dom.tasks_on_proc(i) == DEFAULT_TASK_ID for i in range(main_size)]
+    cond = npw.asboolarray(cond)
+    assert cond.all()
 
 
 def test_create_box2():
-    """Test given parameters"""
-    L = np.asarray([1., 2.5])
-    ori = np.asarray([1, 2.1])
-    dom = Box(2, length=L, origin=ori)
+    L = npw.asrealarray([1., 2.5])
+    ori = npw.asrealarray([1, 2.1])
+    dom = Box(length=L, origin=ori)
     assert dom.dimension == 2
-    assert np.allclose(dom.length, L)
-    assert np.allclose(dom.origin, ori)
+    assert allclose(dom.length, L)
+    assert allclose(dom.origin, ori)
     assert [b == PERIODIC for b in dom.boundaries]
-    
+    cond = [dom.tasks_on_proc(i) == DEFAULT_TASK_ID for i in range(main_size)]
+    cond = npw.asboolarray(cond)
+    assert cond.all()
+
+
+def test_create_box3():
+    L = [1, 2, 4.]
+    dom = Box(length=L)
+    assert dom.dimension == 3
+    assert allclose(dom.length, npw.asrealarray(L))
+    assert allclose(dom.origin, zeros_like(L))
+    assert [b == PERIODIC for b in dom.boundaries]
+    cond = [dom.tasks_on_proc(i) == DEFAULT_TASK_ID for i in range(main_size)]
+    cond = npw.asboolarray(cond)
+    assert cond.all()
+
+
+def test_create_box4():
+    L = [1, 2, 4.]
+    tasks = [CPU] * main_size
+    if main_size > 1:
+        tasks[-1] = GPU
+    dom = Box(length=L, proc_tasks=tasks)
+
+    last = main_size - 1
+    if main_size > 1:
+        if main_rank != last:
+            assert dom.currentTask() == CPU
+        else:
+            assert dom.currentTask() == GPU
+    else:
+        assert dom.currentTask() == CPU
+
+
+# Test topology creation ...
+N = 33
+from parmepy.tools.parameters import Discretization, MPI_params
+r3D = Discretization([N, N, 17])  # No ghosts
+r3DGh = Discretization([N, N, 17], [2, 2, 2])  # Ghosts
+
+CPU = 12
+GPU = 29
+proc_tasks = [CPU] * main_size
+if main_size > 1:
+    proc_tasks[-1] = GPU
+from parmepy.mpi import main_comm
+comm_s = main_comm.Split(color=proc_tasks[main_rank], key=main_rank)
+mpCPU = MPI_params(comm=comm_s, task_id=CPU)
+mpGPU = MPI_params(comm=comm_s, task_id=GPU)
+
+from parmepy.mpi.topology import Cartesian
+
+
+def test_topo_standard():
+    dom = Box()
+    topo = dom.create_topology(discretization=r3D)
+    assert len(dom.topologies) == 1
+    assert isinstance(topo, Cartesian)
+    assert topo is dom.topologies.values()[0]
+    topo2 = dom.create_topology(discretization=r3DGh)
+    assert len(dom.topologies) == 2
+    assert isinstance(topo2, Cartesian)
+    topo3 = dom.create_topology(discretization=r3DGh)
+    assert len(dom.topologies) == 2
+    assert topo3 is topo2
+
+
+def test_topo_multi_tasks():
+    dom = Box(proc_tasks=proc_tasks)
+    if dom.isOnTask(CPU):
+        topo = dom.create_topology(discretization=r3D)
+    elif dom.isOnTask(GPU):
+        topo = dom.create_topology(discretization=r3DGh, dim=2)
+    assert len(dom.topologies) == 1
+    assert isinstance(topo, Cartesian)
+    assert topo is dom.topologies.values()[0]
+    if dom.isOnTask(CPU):
+        assert not topo.hasGhosts()
+    elif dom.isOnTask(GPU):
+        assert topo.hasGhosts()
+
+
+def test_topo_plane():
+    # e.g. for advectionDir
+    dom = Box()
+    topo = dom.create_topology(discretization=r3D,
+                               cutdir=[False, True, False])
+    assert len(dom.topologies) == 1
+    assert isinstance(topo, Cartesian)
+    assert topo is dom.topologies.values()[0]
+    assert topo.dimension == 1
+    assert topo.shape[1] == main_size
+
+
+def test_topo_from_mesh():
+    # e.g. for fftw
+    dom = Box(proc_tasks=proc_tasks)
+    from parmepy.f2py import fftw2py
+    if dom.isOnTask(CPU):
+        localres, global_start = fftw2py.init_fftw_solver(
+            r3D.resolution, dom.length, comm=comm_s.py2f())
+        print localres, global_start
+        topo = dom.create_plane_topology_from_mesh(localres=localres,
+                                                   global_start=global_start,
+                                                   discretization=r3D)
+    elif dom.isOnTask(GPU):
+        topo = dom.create_topology(discretization=r3DGh, dim=2)
+    if dom.isOnTask(CPU):
+        assert (topo.mesh.resolution == localres).all()
+        assert (topo.mesh.start() == global_start).all()
+        assert topo.dimension == 1
+        assert (topo.shape == [1, 1, comm_s.Get_size()]).all()
+    elif dom.isOnTask(GPU):
+        assert topo.size == 1
+
+
 # This may be useful to run mpi tests
 if __name__ == "__main__":
     test_create_box1()
     test_create_box2()
+    test_create_box3()
+    test_create_box4()
+    test_topo_standard()
+    test_topo_multi_tasks()
+    test_topo_plane()
+    test_topo_from_mesh()
diff --git a/HySoP/hysop/domain/tests/test_obstacle.py b/HySoP/hysop/domain/tests/test_obstacle.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8d92a2f131fb13e2bea1cab4accc7f066311330
--- /dev/null
+++ b/HySoP/hysop/domain/tests/test_obstacle.py
@@ -0,0 +1,267 @@
+"""
+Testing parmepy.domain.obstacle.Obstacle
+"""
+import parmepy as pp
+from parmepy.fields.continuous import Field
+from parmepy.mpi.topology import Cartesian
+from parmepy.domain.obstacle.sphere import Sphere, HemiSphere
+from parmepy.domain.obstacle.disk import Disk, HalfDisk
+from parmepy.domain.obstacle.planes import HalfSpace, Plane, SubSpace,\
+    SubPlane, PlaneBoundaries
+from parmepy.domain.obstacle.controlBox import ControlBox
+import numpy as np
+from parmepy.constants import CHECK_F_CONT
+from parmepy.tools.parameters import Discretization
+
+nb = 129
+Lx = Ly = Lz = 2
+dom = pp.Box(dimension=3, length=[Lx, Ly, Lz], origin=[-1., -1., -1.])
+dom2D = pp.Box(dimension=2, length=[Lx, Ly], origin=[-1., -1.])
+resol3D = Discretization([nb, nb, nb])
+resol2D = Discretization([nb, nb])
+scal = Field(domain=dom)
+scal2D = Field(domain=dom2D)
+topo = Cartesian(dom, resol3D)
+topo2D = Cartesian(dom2D, resol2D)
+coords = topo.mesh.coords
+coords2D = topo2D.mesh.coords
+scald = scal.discretize(topo).data[0]
+scald2D = scal2D.discretize(topo2D).data[0]
+h3d = topo.mesh.space_step
+h2d = topo2D.mesh.space_step
+dvol = np.prod(h3d)
+ds = np.prod(h2d)
+import math
+pi = math.pi
+tol = 1e-6
+lengths = np.asarray([20 * h3d[0], 22 * h3d[1], 31 * h3d[2]])
+rlengths = lengths + h3d
+rlengths2d = lengths[:2] + h2d
+scald[:] = 1.
+scald2D[:] = 1.
+
+
+def testSphere():
+    scald[:] = 1.
+    rad = 0.3
+    sphere = Sphere(domain=dom, position=[0., 0., 0.],
+                    radius=rad, porousLayers=[0.13])
+
+    sphere.discretize(topo)
+    ind = sphere.ind[topo][0]
+    (ix, iy, iz) = topo.mesh.indices([-0.2, 0, 0.2])
+    assert ind[ix, iy, iz]
+    (ix, iy, iz) = topo.mesh.indices([0.5, 0.1, 0.2])
+    assert not ind[ix, iy, iz]
+
+
+def testHemiSphere():
+    scald[:] = 1.
+    rad = 0.3
+    sphere = HemiSphere(domain=dom, position=[0., 0., 0.],
+                        radius=rad, porousLayers=[0.13])
+
+    sphere.discretize(topo)
+    ind = sphere.ind[topo][0]
+    (ix, iy, iz) = topo.mesh.indices([-0.3, 0., 0.])
+    assert ind[ix, iy, iz]
+    (ix, iy, iz) = topo.mesh.indices([0.3, 0., 0.])
+    assert not ind[ix, iy, iz]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testDisk():
+    scald2D[:] = 1.
+    rad = 0.3
+    sphere = Disk(domain=dom2D, position=[0., 0.],
+                  radius=rad, porousLayers=[0.13])
+
+    sphere.discretize(topo2D)
+    ind = sphere.ind[topo2D][0]
+    (ix, iy) = topo2D.mesh.indices([-0.2, 0.])
+    assert ind[ix, iy]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testHalfDisk():
+    scald2D[:] = 1.
+    rad = 0.3
+    sphere = HalfDisk(domain=dom2D, position=[0., 0.],
+                      radius=rad, porousLayers=[0.13])
+
+    sphere.discretize(topo2D)
+    ind = sphere.ind[topo2D][0]
+    (ix, iy) = topo2D.mesh.indices([-0.2, 0.])
+    assert ind[ix, iy]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testHalfSpace2D():
+    hsp = HalfSpace(domain=dom2D, normal=[1, 1], point=[0., 0.])
+    hsp.discretize(topo2D)
+    ind = hsp.ind[topo2D][0]
+    (ix, iy) = topo2D.mesh.indices([-0.8, 0.5])
+    assert ind[ix, iy]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testHalfSpace3D():
+    hsp = HalfSpace(domain=dom, normal=[1, 1, 1], point=[0., 0., 0.])
+    hsp.discretize(topo)
+    ind = hsp.ind[topo][0]
+    (ix, iy, iz) = topo.mesh.indices([-0.8, 0.5, -0.5])
+    assert ind[ix, iy, iz]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testPlane2D():
+    plane = Plane(domain=dom2D, normal=[1, 1], point=[0., 0.])
+    plane.discretize(topo2D)
+    ind = plane.ind[topo2D][0]
+    (ix, iy) = topo2D.mesh.indices([-0.5, 0.5])
+    assert ind[ix, iy]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testPlane3D():
+    plane = Plane(domain=dom, normal=[1, 1, 1], point=[0., 0., 0.])
+    plane.discretize(topo)
+    ind = plane.ind[topo][0]
+    (ix, iy, iz) = topo.mesh.indices([-0.3, 0.5, -0.2])
+    assert ind[ix, iy, iz]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testSubSpace2D():
+    ssp = SubSpace(domain=dom2D, normal=[1, 0.], point=[0., 0.], lengths=lengths[:2])
+    ssp.discretize(topo2D)
+    ind = ssp.ind[topo2D][0]
+    (ix, iy) = topo2D.mesh.indices([-0.5, 0.2])
+    assert ind[ix, iy]
+
+
+def testSubSpace3D():
+    ssp = SubSpace(domain=dom, normal=[0, 1, 0], point=[0., 0., 0.], lengths=lengths)
+    ssp.discretize(topo)
+    ind = ssp.ind[topo][0]
+    (ix, iy, iz) = topo.mesh.indices([0.3, -0.1, 0.2])
+    assert ind[ix, iy, iz]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testSubPlane2D():
+    ssp = SubPlane(domain=dom2D, normal=[1, 0], point=[0., 0.], lengths=lengths[:2])
+    ssp.discretize(topo2D)
+    ind = ssp.ind[topo2D][0]
+    ll = np.sum(scald2D[ind]) * h2d[1]
+    rll = rlengths2d[1]
+    assert abs(ll - rll) < tol
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testSubPlane3D():
+    ssp = SubPlane(domain=dom, normal=[0, 1, 0], point=[0., 0., 0.], lengths=lengths)
+    ssp.discretize(topo)
+    ind = ssp.ind[topo][0]
+    surf = np.sum(scald[ind]) * ds
+    rsurf = rlengths[0] * rlengths[2]
+    assert abs(surf - rsurf) < tol
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testPlaneBC2D():
+    bc = PlaneBoundaries(domain=dom2D, normal_dir=1, thickness=0.2)
+    bc.discretize(topo2D)
+    ind = bc.ind[topo2D][0]
+    (ix, iy) = topo2D.mesh.indices([-0.5, - Ly * 0.5])
+    assert ind[ix, iy]
+    (ix, iy) = topo2D.mesh.indices([-0.5, Ly * 0.5 - 2 * h2d[1]])
+    assert ind[ix, iy]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testPlaneBC3D():
+    bc = PlaneBoundaries(domain=dom, normal_dir=1, thickness=0.2)
+    bc.discretize(topo)
+    ind = bc.ind[topo][0]
+    (ix, iy, iz) = topo.mesh.indices([-0.5, -Ly * 0.5, 0.3])
+    assert ind[ix, iy, iz]
+    (ix, iy, iz) = topo.mesh.indices([-0.5, Ly * 0.5 - 2 * h2d[1], 0.3])
+    assert ind[ix, iy, iz]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testControlBox2D():
+    lx = 10 * h3d[0]
+    ly = 22 * h3d[1]
+
+    cb = ControlBox(domain=dom2D, origin=[-0.5, -0.5], lengths=[lx, ly])
+    cb.discretize(topo2D)
+    surf = cb.integrate(scal2D, topo2D)
+    rsurf = lx * ly
+    assert abs(surf - rsurf) < tol
+    assert cb.ind[topo2D][0].flags.f_contiguous is CHECK_F_CONT
+
+
+def testControlBox3D():
+    ll = np.asarray([0] * 3)
+    ll[0] = 10 * h3d[0]
+    ll[1] = 22 * h3d[1]
+    ll[2] = 51 * h3d[2]
+
+    cb = ControlBox(domain=dom, origin=[0.5, -0.5, -0.5], lengths=ll)
+    cb.discretize(topo)
+    vol = cb.integrate(scal, topo)
+    rvol = np.prod(ll)
+    assert abs(rvol - vol) < tol
+
+    vol = cb.integrate(scal, topo, useSlice=False)
+    assert abs(rvol - vol) < tol
+    ind = np.asarray([0, 1, 2])
+    for i in xrange(3):
+        surfUp = cb.integrateOnSurface(scal, topo, normalDir=i, up=True)
+        surfDown = cb.integrateOnSurface(scal, topo, normalDir=i, up=False)
+        j = np.where(ind != i)
+        sref = np.prod(ll[j])
+        assert abs(surfUp - sref) < tol
+        assert abs(surfDown - sref) < tol
+
+    assert cb.ind[topo][0].flags.f_contiguous is CHECK_F_CONT
+
+
+def testControlBoxSphere():
+    lx = 1.5
+    ly = 1.5
+    lz = 1.5
+    rad = 0.2
+    cb = ControlBox(domain=dom, origin=[-0.75, -0.75, -0.75], lengths=[lx, ly, lz])
+    layer = 2 * h3d[0]
+    sphere = Sphere(domain=dom, position=[0., 0., 0.],
+                    radius=rad, porousLayers=[layer])
+    cb.sub(sphere, topo)
+    ind = cb.indReduced[topo][0]
+    (ix, iy, iz) = topo.mesh.indices([0.1, 0.0, 0.])
+    assert not ind[ix, iy, iz]
+    (ix, iy, iz) = topo.mesh.indices([0.3, 0.0, 0.])
+    assert ind[ix, iy, iz]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+# This may be useful to run mpi tests
+#if __name__ == "__main__":
+    ## TODO : add tests for distributed obstacles.
+    ## testHemiSphere()
+    ## testDisk()
+    ## testHalfDisk()
+    ## testHalfSpace2D()
+    ## testHalfSpace3D()
+    ## testPlane2D()
+    ## testPlane3D()
+    ## testSubSpace2D()
+    ## testSubSpace3D()
+    ## testSubPlane2D()
+    ## testSubPlane3D()
+    ## testPlaneBC2D()
+    ## testPlaneBC3D()
+    ## testControlBox2D()
+    ## testControlBox3D()
+    ## testControlBoxSphere()
diff --git a/HySoP/hysop/fields/__init__.pyc b/HySoP/hysop/fields/__init__.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b5afb81a7e8ff7f8a074db9068516b64e126f384
Binary files /dev/null and b/HySoP/hysop/fields/__init__.pyc differ
diff --git a/HySoP/hysop/fields/continuous.py b/HySoP/hysop/fields/continuous.py
index 5885e34a0fbe8221edc53e9083bb8b5e4985f485..f36500e2959f6d2e2294c3d3d4805b92d6ab2f1c 100644
--- a/HySoP/hysop/fields/continuous.py
+++ b/HySoP/hysop/fields/continuous.py
@@ -6,6 +6,7 @@ Continuous variable description.
 from parmepy.constants import debug
 from parmepy.fields.discrete import DiscreteField
 from parmepy.mpi import main_rank
+from parmepy.tools.profiler import Profiler
 
 
 class Field(object):
@@ -45,9 +46,9 @@ class Field(object):
         by its domain of definition.
 
         @param domain : physical domain where this field is defined.
+        @param name : a name of the variable. Required because of h5py i/o.
         @param formula : a user-defined python function
         (default = 0 all over the domain)
-        @param name : name of the variable.
         @param isVector : true if the field is a vector.
         @param nbComponents : Components number (1 for scalar fields).
         @param doVectorize : true if formula must be vectorized
@@ -60,10 +61,9 @@ class Field(object):
         ## dimension (1, 2 or 3D), equal to domain dimension.
         self.dimension = self.domain.dimension
         ## Id (optional)
-        if(name is not None):
-            self.name = name
-        else:
-            self.name = 'unamed'
+        if name is None:
+            name = 'unnamed'
+        self.name = name
         ## Dictionnary of all the discretizations of this field.
         ## Key = parmepy.mpi.topology.Cartesian,
         ## value =parmepy.fields.discrete.DiscreteField.
@@ -84,6 +84,13 @@ class Field(object):
         ## True if a formula is given and if it must be vectorized
         self.doVectorize = doVectorize
 
+        ## Time profiling
+        self.profiler = Profiler(self, self.domain.comm_task)
+
+    def get_profiling_info(self):
+        for d in self.discreteFields.values():
+            self.profiler += d.profiler
+
     @debug
     def discretize(self, topo):
         """
@@ -98,7 +105,7 @@ class Field(object):
         if topo in self.discreteFields.keys():
             return self.discreteFields[topo]
         else:
-            nameD = self.name + '_' + str(topo.getId())
+            nameD = self.name + '_' + str(topo.get_id())
             self.discreteFields[topo] = DiscreteField(
                 topo,
                 isVector=self.isVector,
@@ -120,24 +127,24 @@ class Field(object):
         self.doVectorize = doVectorize
 
     @debug
-    def initialize(self, currentTime=0., topo=None):
+    def initialize(self, time=0., topo=None):
         """
         Initialize one or all the discrete fields associated with
         this continuous field using the formula set during construction
         or with setFormula method.
         If formula is not set, field values are set to zero.
-        @param[in] currentTime current time
+        @param[in] time current time
         @param[in] topo a parmepy.mpi.Cartesian topology on which
-         the field must be initialized.
-         If topo is not set, all discrete fields will be initialized.
+        the field must be initialized.
+        If topo is not set, all discrete fields will be initialized.
         """
         if topo is None:
             for df in self.discreteFields.values():
-                df.initialize(self.formula, self.doVectorize, currentTime,
-                      *self.extraParameters)
+                df.initialize(self.formula, self.doVectorize, time,
+                              *self.extraParameters)
         else:
             df = self.discretization(topo)
-            df.initialize(self.formula, self.doVectorize, currentTime,
+            df.initialize(self.formula, self.doVectorize, time,
                           *self.extraParameters)
 
     def value(self, *pos):
@@ -166,7 +173,7 @@ class Field(object):
             s += 'vector field '
         else:
             s += 'scalar field '
-        s += 'with the following (local) discretisations :\n'
+        s += 'with the following (local) discretizations :\n'
         for f in self.discreteFields.values():
             s += f.__str__()
         return s
@@ -183,56 +190,84 @@ class Field(object):
             msg = 'This field has not been discretized on the given topology.'
             print (msg)
 
-    def norm(self, topo=None):
+    def norm(self, topo):
         """
         Compute the p-norm of the discretisation of the
         current field corresponding to topology topo (input arg).
         Default = norm-2.
         @param topo : the topology corresponding to the
-        required discretisation. If topo == None, returns
-        the norm of the first discretization.
         @return norm of the field.
+        Remark : None topo case may occur when topo is not defined on the
+        current MPI task.
         """
-        if topo is not None:
-            return self.discreteFields[topo].norm()
-        else:
-            return self.discreteFields.values()[0].norm()
+        if topo is None:
+            return None
+        return self.discreteFields[topo].norm()
 
-    def normh(self, topo=None):
+    def normh(self, topo):
         """
         Compute a 'grid-norm' for the discrete field
         norm = ( hd * sum data[d](i,...)**p)**1/p for d = 1..dim
         and hd the grid step size in direction d.
         Sum on all grid points excluding ghosts.
         """
-        if topo is not None:
-            return self.discreteFields[topo].normh()
-        else:
-            return self.discreteFields.values()[0].normh()
+        if topo is None:
+            return None
+        return self.discreteFields[topo].normh()
 
     def setExtraParameters(self, *args):
         """
         Set values for (optional) list of extra parameters
         that may be required in formula.
-        @param params : a list of parameters to set.
+        @param params : a tuple of parameters to set.
         """
         self.extraParameters = args
 
-    def dump(self, filename, topo=None, mode=None):
+    def dump(self, filename, topo=None):
         """
         Dump (serialize) the data of the field into filename.
         serialization process.
         @param filename : name of the file in which data are serialized
         @param topo : topology that identify a discrete field to be saved.
-        @param mode : set mode='append' to add data to an existing file.
-        if None, the first discrete field in the list will be saved.
         """
         if topo is not None:
             assert topo in self.discreteFields.keys()
-            self.discreteFields[topo].dump(filename, mode)
+            self.discreteFields[topo].dump(filename)
         else:
             # dump all discr or only the first one?
-            self.discreteFields.values()[0].dump(filename, mode)
+            self.discreteFields.values()[0].dump(filename)
+
+    def hdf_dump(self, discretization, io_params=None):
+        from parmepy.operator.hdf_io import HDF_Writer
+        from parmepy.problem.simulation import Simulation
+        simu = Simulation(nbIter=1)
+        if io_params is None:
+            from parmepy.tools.parameters import IO_params
+            from parmepy.constants import HDF5
+            io_params = IO_params(self.name + '_', fileformat=HDF5)
+        wr = HDF_Writer(variables={self: discretization},
+                        io_params=io_params)
+        wr.discretize()
+        wr.setup()
+        wr.apply(simu)
+        wr.finalize()
+
+    def hdf_load(self, discretization, io_params=None, restart=None):
+        from parmepy.operator.hdf_io import HDF_Reader
+
+        if io_params is None:
+            from parmepy.tools.parameters import IO_params
+            from parmepy.constants import HDF5
+            io_params = IO_params(self.name + '_', fileformat=HDF5)
+        read = HDF_Reader(variables={self: discretization},
+                          io_params=io_params, restart=restart)
+        read.discretize()
+        read.setup()
+        print 'read field from ', read.io_params.filename
+        read.apply()
+        read.finalize()
+
+        return 0
 
     def load(self, filename, topo=None, fieldname=None):
         """
@@ -262,37 +297,5 @@ class Field(object):
                 dfield.zero()
 
     def finalize(self):
-        pass
-
-    def integrate(self, box, topo, useSlice=True,
-                  component=0, root=0, mpiall=True):
-        """
-        integrate the field on a control box, on the current processus
-        @param box : a parmepy.domain.obstacles.controlBox.ControlBox
-        @param topo : discretization used for integration
-        @param useSlice : true if integrate with slices else integrate with
-        boolean numpy array
-        @param component : component number of the field to integrate
-        @param root : root process for mpi reduce
-        @param mpiall : true for allreduce, else reduce on root
-        """
-        return self.discreteFields[topo].integrate(box, useSlice, component,
-                                                   root, mpiall)
-
-    def integrateOnSurface(self, surf, topo, useSlice=True, component=0,
-                           root=0, mpiall=True):
-        """
-        integrate the field on a surface, on the current processus
-        @param surf : a parmepy.domain.obstacles.controlBox.planes.SubPlane
-        @param topo : discretization used for integration
-        @param useSlice : true if integrate with slices else integrate with
-        boolean numpy array
-        @param component : component number of the field to integrate
-        @param root : root process for mpi reduce
-        @param mpiall : true for allreduce, else reduce on root
-        Warning : surf slices or cond must have been computed for integration
-        purpose, that is last point in each dir must not be included.
-        """
-        return self.discreteFields[topo].integrateOnSurface(surf, useSlice,
-                                                            component, root,
-                                                            mpiall)
+        for dfield in self.discreteFields.values():
+            dfield.finalize()
diff --git a/HySoP/hysop/fields/continuous.pyc b/HySoP/hysop/fields/continuous.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6ca72a9ee5f8b44803d7abc5191bb004b8cb89c7
Binary files /dev/null and b/HySoP/hysop/fields/continuous.pyc differ
diff --git a/HySoP/hysop/fields/discrete.py b/HySoP/hysop/fields/discrete.py
index a70d9f73369d1f19b3e9fb633b9bdcc4b3d6225a..e0edac882f14d3b2a38651520e4031b62d6c55a4 100644
--- a/HySoP/hysop/fields/discrete.py
+++ b/HySoP/hysop/fields/discrete.py
@@ -3,15 +3,15 @@
 
 Discrete fields (scalars or vectors) descriptions.
 """
-from parmepy.constants import debug, parmesPickle
+from parmepy.constants import debug
+import cPickle
 from itertools import count
 from parmepy.mpi import main_rank
-from parmepy.tools.timers import Timer, timed_function
+from parmepy.tools.profiler import profile, Profiler
 import numpy as np
 from parmepy.constants import ORDER, PARMES_REAL
 import parmepy.tools.numpywrappers as npw
 import numpy.linalg as la
-from parmepy.numerics.updateGhosts import UpdateGhosts
 
 
 class DiscreteField(object):
@@ -95,7 +95,7 @@ class DiscreteField(object):
         ## Scalar or vector
         self.isVector = isVector
         ## Object to store computational times
-        self.timer = Timer(self, ' (' + self.name + ')')
+        self.profiler = Profiler(self, self.domain.comm_task)
         ## Number of components of the field
         if nbComponents is None:
             self.nbComponents = self.dimension if self.isVector else 1
@@ -103,15 +103,7 @@ class DiscreteField(object):
             self.nbComponents = nbComponents
         ## The memory space for data ...
         self.data = [npw.zeros(self.resolution)
-                     for d in xrange(self.nbComponents)]
-
-        ## Ghost synchronization function
-        self.synchro_ghosts = self._empty_synchro_ghosts
-        self._the_gosts_synchro = None
-        if self.topology.hasGhosts:
-            self.synchro_ghosts = self._synchro_ghosts
-            self._the_ghosts_synchro = UpdateGhosts(self.topology,
-                                                    self.nbComponents)
+                     for _ in xrange(self.nbComponents)]
 
     def __getitem__(self, i):
         """ Access to the content of the field.
@@ -126,17 +118,10 @@ class DiscreteField(object):
         """
         self.data[i][...] = value
 
-    def _empty_synchro_ghosts(self):
-        pass
-
-    def _synchro_ghosts(self):
-        """Synchronize ghosts."""
-        self._the_ghosts_synchro(self.data)
-
     @debug
-    @timed_function
+    @profile
     def initialize(self, formula=None, doVectorize=False,
-                   currentTime=0., *args):
+                   time=0., *args):
         """
         Initialize values with a given formula.
 
@@ -146,12 +131,12 @@ class DiscreteField(object):
         @param doVectorize : true (default = false) if formula must
         be vectorized to handle numpy arrays. See notes about fields
         initialization in parmepy.fields.
-        @param currentTime : current time (default set to 0.0)
+        @param time : current time (default set to 0.0)
         @param args : extra (optional) parameters
         """
         if formula is not None:
             # Argument of formula. Usually : x, y, z, t, extras
-            arg_list = self.topology.mesh.coords + (currentTime,) + args
+            arg_list = self.topology.mesh.coords + (time,) + args
             if doVectorize:  # input formula is not defined for numpy arrays
                 if isinstance(formula, np.lib.function_base.vectorize):
                     v_formula = formula
@@ -226,28 +211,20 @@ class DiscreteField(object):
         return gResult ** 0.5
 
     @debug
-    @timed_function
-    def dump(self, filename, mode=None):
+    @profile
+    def dump(self, filename):
         """
         Dump (serialize) the data of the field into filename.
         @param filename : name of the file in which data are serialized
-        @param mode : set mode='append' to add data to an existing file
         """
         filename += '_rk_'
         filename += str(main_rank)
-        # create a new db
-        if mode is None:
-            db = parmesPickle(filename, mode='store')
-        elif mode is 'append':
-            # use an existing db
-            db = parmesPickle(filename, mode='load')
-
-        #for dim in xrange(self.dimension):
-        #    idd = self.name + '_' + str(dim)
-        db.dump(self.data, self.name)
+        # open file
+        db = open(filename, 'wb')
+        cPickle.dump(self.data, db)
 
     @debug
-    @timed_function
+    @profile
     def load(self, filename, fieldname=None):
         """
         load data from a file build
@@ -258,87 +235,17 @@ class DiscreteField(object):
             fieldname = self.name
         filename += '_rk_'
         filename += str(main_rank)
-        db = parmesPickle(filename, mode='load')
+        db = open(filename, 'wb')
         for dim in xrange(self.nbComponents):
-            self.data[dim] = db.load(fieldname)[0][dim]
+            self.data[dim] = cPickle.load(db)[0][dim]
 
     def zero(self):
         """ set all components to zero"""
         for dim in xrange(self.nbComponents):
             self.data[dim][...] = 0.0
 
-    def integrate_on_proc(self, box, useSlice=True, component=0):
-        """
-        integrate the field on a control box, on the current processus
-        @param box : a parmepy.domain.obstacles.controlBox.ControlBox
-        @param useSlice : true if integrate with slices else integrate with
-        boolean numpy array
-        @param component : component number of the field to integrate
-        """
-        if useSlice:
-            cond = box.slices[self.topology]
-        else:
-            iC = self.topology.mesh.iCompute
-            cond = box.ind[self.topology][0][iC]
-        dvol = npw.prod(self.topology.mesh.space_step)
-        result = npw.sum(self.data[component][cond])
-        result *= dvol
-        return result
-
-    def integrate(self, box, useSlice=True, component=0,
-                  root=0, mpiall=True):
-        """
-        integrate the field on a control box, on the current processus
-        @param box : a parmepy.domain.obstacles.controlBox.ControlBox
-        @param useSlice : true if integrate with slices else integrate with
-        boolean numpy array
-        @param component : component number of the field to integrate
-        @param root : root process for mpi reduce
-        @param mpiall : true for allreduce, else reduce on root
-        """
-        res = self.integrate_on_proc(box, useSlice, component)
-        if mpiall:
-            return self.topology.comm.allreduce(res)
-        else:
-            return self.topology.comm.reduce(res, root=root)
-
-    def integrateOnSurface(self, surf, useSlice=True, component=0,
-                           root=0, mpiall=True):
-        """
-        integrate the field on a surface, on the current processus
-        @param surf : a parmepy.domain.obstacles.controlBox.planes.SubPlane
-        @param useSlice : true if integrate with slices else integrate with
-        boolean numpy array
-        @param component : component number of the field to integrate
-        @param root : root process for mpi reduce
-        @param mpiall : true for allreduce, else reduce on root
-        Warning : surf slices or cond must have been computed for integration
-        purpose, that is last point in each dir must not be included.
-        """
-        res = self.integrateOnSurf_proc(surf, useSlice, component)
-        if mpiall:
-            return self.topology.comm.allreduce(res)
-        else:
-            return self.topology.comm.reduce(res, root=root)
+    def finalize(self):
+        pass
 
-    def integrateOnSurf_proc(self, surf, useSlice=True, component=0):
-        """
-        integrate the field on a surface, on the current processus
-        @param surf : a parmepy.domain.obstacles.controlBox.planes.Plane
-         or SubPlane
-        @param useSlice : true if integrate with slices else integrate with
-        boolean numpy array
-        @param component : component number of the field to integrate
-        Warning : surf slices or cond must have been computed for integration
-        purpose, that is last point in each dir must not be included.
-        """
-        if useSlice:
-            cond = surf.slices[self.topology]
-        else:
-            iC = self.topology.mesh.iCompute
-            cond = surf.ind[self.topology][0][iC]
-        dirs = np.where(surf.normal == 0)[0]
-        dS = npw.prod(self.topology.mesh.space_step[dirs])
-        result = npw.sum(self.data[component][cond])
-        result *= dS
-        return result
+    def get_profiling_info(self):
+        pass
diff --git a/HySoP/hysop/fields/discrete.pyc b/HySoP/hysop/fields/discrete.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c761b9e6ec9e59da0987d747b5face07eeef6a38
Binary files /dev/null and b/HySoP/hysop/fields/discrete.pyc differ
diff --git a/HySoP/hysop/fields/tests/test_field.py b/HySoP/hysop/fields/tests/test_field.py
index 1895fe28650b139a9d1b6592eb2e35c0cc722954..0fdf6ea41246adc4b7bd81af2770e4fe361ac39b 100644
--- a/HySoP/hysop/fields/tests/test_field.py
+++ b/HySoP/hysop/fields/tests/test_field.py
@@ -1,18 +1,24 @@
 """
 Testing parmepy.field.continuous.Field
 """
+import parmepy
+print parmepy.__file__
 from parmepy.fields.continuous import Field
 from parmepy.domain.box import Box
-from parmepy.mpi.topology import Cartesian
-from parmepy.domain.obstacle.planes import SubPlane
-import sys
+from parmepy.tools.parameters import Discretization
 import numpy as np
+from parmepy.fields.tests.func_for_tests import func_scal_1, func_scal_2, \
+    func_vec_1, func_vec_2, func_vec_3, func_vec_4, func_vec_5, func_vec_6
+from numpy import allclose
+
+d3D = Discretization([33, 33, 33])
+d2D = Discretization([33, 33])
+nbc = 4
 
 
 def test_continuous():
     """ Basic continuous field construction """
 
-    print (sys.path)
     dom = Box()
     cf = Field(dom)
     assert cf.isVector is False
@@ -52,8 +58,8 @@ def test_discretization():
     dom = Box()
     csf = Field(dom)
     cvf = Field(dom, isVector=True)
-    resolTopo = [33, 33, 17]
-    topo = Cartesian(dom, 3, resolTopo)
+    resolTopo = Discretization([33, 33, 17])
+    topo = dom.create_topology(resolTopo)
     csf.discretize(topo)
     cvf.discretize(topo)
     assert np.equal(csf.discreteFields[topo].resolution,
@@ -66,28 +72,286 @@ def test_discretization():
                     cvf.discreteFields[topo].data[2].shape).all()
 
 
-def test_integrate_onSurf():
-    dom = Box()
-    myf = Field(dom, isVector=True)
-    resolTopo = [33, 33, 17]
-    topo = Cartesian(dom, 3, resolTopo)
-    fdiscr = myf.discretize(topo)
-    fdiscr[0][...] = 1.0
-    normal = [1, 0, 0]
-    hh = topo.mesh.space_step
-    surf = SubPlane(domain=dom, normal=normal, point=dom.origin,
-                    lengths=dom.length - hh,
-                    epsilon=topo.mesh.space_step[0]/2.)
-
-    surf.discretize(topo)
-    res = myf.integrateOnSurface(surf, topo)
-    sref = dom.length[1] * dom.length[2]
-    assert(abs(res - sref) < 1e-6)
+# Non-Vectorized formula for a scalar
+def test_analytical_field_1():
+    box = Box()
+    topo = box.create_topology(discretization=d3D)
+    coords = topo.mesh.coords
+    caf = Field(box, formula=func_scal_1)
+    ref = Field(box)
+    refd = ref.discretize(topo)
+    cafd = caf.discretize(topo)
+    refd = ref.discretize(topo)
+    ids = id(cafd.data[0])
+    caf.initialize()
+    refd.data = func_scal_1(refd.data, *(coords + (0.,)))
+    assert allclose(cafd[0], refd.data[0])
+    assert id(cafd.data[0]) == ids
+    time = 3.0
+    caf.initialize(time=time)
+    refd.data = func_scal_1(refd.data, *(coords + (time,)))
+    assert allclose(cafd[0], refd.data[0])
+    assert id(cafd.data[0]) == ids
+
+
+# Vectorized formula
+def test_analytical_field_2():
+    box = Box()
+    topo = box.create_topology(d3D)
+    coords = topo.mesh.coords
+    caf = Field(box, formula=func_scal_2, doVectorize=True)
+    ref = Field(box)
+    cafd = caf.discretize(topo)
+    ids = id(cafd.data[0])
+    refd = ref.discretize(topo)
+    caf.initialize()
+    refd.data = func_scal_1(refd.data, *(coords + (0.,)))
+    assert allclose(cafd[0], refd.data[0])
+    assert id(cafd.data[0]) == ids
+    time = 3.0
+    caf.initialize(time=time)
+    refd.data = func_scal_1(refd.data, *(coords + (time,)))
+    assert allclose(cafd[0], refd.data[0])
+    assert id(cafd.data[0]) == ids
+
+
+# Non-Vectorized formula for a vector
+def test_analytical_field_3():
+    box = Box()
+    topo = box.create_topology(d3D)
+    coords = topo.mesh.coords
+    caf = Field(box, formula=func_vec_1, isVector=True)
+    ref = Field(box, isVector=True)
+    refd = ref.discretize(topo)
+    cafd = caf.discretize(topo)
+    refd = ref.discretize(topo)
+    ids = [0, ] * 3
+    for i in xrange(3):
+        ids[i] = id(cafd.data[i])
+    caf.initialize()
+    refd.data = func_vec_1(refd.data, *(coords + (0.,)))
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd[i], refd.data[i])
+        assert id(cafd.data[i]) == ids[i]
+    time = 3.0
+    caf.initialize(time=time)
+    refd.data = func_vec_1(refd.data, *(coords + (time,)))
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd[i], refd.data[i])
+        assert id(cafd.data[i]) == ids[i]
+
+
+# Vectorized formula for a vector
+def test_analytical_field_4():
+    box = Box()
+    topo = box.create_topology(d3D)
+    coords = topo.mesh.coords
+    caf = Field(box, formula=func_vec_2, isVector=True, doVectorize=True)
+    ref = Field(box, isVector=True)
+    refd = ref.discretize(topo)
+    cafd = caf.discretize(topo)
+    refd = ref.discretize(topo)
+    ids = [0, ] * 3
+    for i in xrange(3):
+        ids[i] = id(cafd.data[i])
+    caf.initialize()
+    refd.data = func_vec_1(refd.data, *(coords + (0.,)))
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd[i], refd.data[i])
+        assert id(cafd.data[i]) == ids[i]
+    time = 3.0
+    caf.initialize(time=time)
+    refd.data = func_vec_1(refd.data, *(coords + (time,)))
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd[i], refd.data[i])
+        assert id(cafd.data[i]) == ids[i]
+
+
+# Non-Vectorized formula for a vector, with extra-arguments
+def test_analytical_field_5():
+    box = Box()
+    topo = box.create_topology(d3D)
+    coords = topo.mesh.coords
+    caf = Field(box, formula=func_vec_3, isVector=True)
+    theta = 0.3
+    caf.setExtraParameters(theta)
+    ref = Field(box, isVector=True)
+    refd = ref.discretize(topo)
+    cafd = caf.discretize(topo)
+    refd = ref.discretize(topo)
+    ids = [0, ] * 3
+    for i in xrange(3):
+        ids[i] = id(cafd.data[i])
+    caf.initialize()
+    refd.data = func_vec_3(refd.data, *(coords + (0., theta)))
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd[i], refd.data[i])
+        assert id(cafd.data[i]) == ids[i]
+    time = 3.0
+    caf.initialize(time=time)
+    refd.data = func_vec_3(refd.data, *(coords + (time, theta)))
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd[i], refd.data[i])
+        assert id(cafd.data[i]) == ids[i]
+
+
+# Vectorized formula for a vector, with extra-arguments
+def test_analytical_field_6():
+    box = Box()
+    topo = box.create_topology(d3D)
+    coords = topo.mesh.coords
+    caf = Field(box, formula=func_vec_4, isVector=True, doVectorize=True)
+    theta = 0.3
+    caf.setExtraParameters(theta)
+    ref = Field(box, isVector=True)
+    refd = ref.discretize(topo)
+    cafd = caf.discretize(topo)
+    refd = ref.discretize(topo)
+    ids = [0, ] * 3
+    for i in xrange(3):
+        ids[i] = id(cafd.data[i])
+    caf.initialize()
+    refd.data = func_vec_3(refd.data, *(coords + (0., theta)))
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd[i], refd.data[i])
+        assert id(cafd.data[i]) == ids[i]
+    time = 3.0
+    caf.initialize(time=time)
+    refd.data = func_vec_3(refd.data, *(coords + (time, theta)))
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd[i], refd.data[i])
+        assert id(cafd.data[i]) == ids[i]
+
+
+# Non-Vectorized formula for a field with nbComponents
+# different from domain dim and  with extra-arguments
+def test_analytical_field_7():
+    box = Box()
+    topo = box.create_topology(d3D)
+    coords = topo.mesh.coords
+    caf = Field(box, formula=func_vec_5, nbComponents=nbc)
+    theta = 0.3
+    caf.setExtraParameters(theta)
+    ref = Field(box, nbComponents=nbc)
+    refd = ref.discretize(topo)
+    cafd = caf.discretize(topo)
+    refd = ref.discretize(topo)
+    ids = [0, ] * nbc
+    for i in xrange(nbc):
+        ids[i] = id(cafd.data[i])
+
+    caf.initialize()
+    refd.data = func_vec_5(refd.data, *(coords + (0., theta)))
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd[i], refd.data[i])
+        assert id(cafd.data[i]) == ids[i]
+    time = 3.0
+    caf.initialize(time=time)
+    refd.data = func_vec_5(refd.data, *(coords + (time, theta)))
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd[i], refd.data[i])
+        assert id(cafd.data[i]) == ids[i]
+
+
+# Non-Vectorized formula for a 2D field with nbComponents
+# different from domain dim and  with extra-arguments
+def test_analytical_field_8():
+    box = Box(dimension=2, length=[1., 1.], origin=[0., 0.])
+    topo = box.create_topology(d2D)
+    coords = topo.mesh.coords
+    caf = Field(box, formula=func_vec_6, nbComponents=nbc)
+    theta = 0.3
+    caf.setExtraParameters(theta)
+    ref = Field(box, nbComponents=nbc)
+    refd = ref.discretize(topo)
+    cafd = caf.discretize(topo)
+    refd = ref.discretize(topo)
+    ids = [0, ] * nbc
+    for i in xrange(nbc):
+        ids[i] = id(cafd.data[i])
+    caf.initialize()
+    refd.data = func_vec_6(refd.data, *(coords + (0., theta)))
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd[i], refd.data[i])
+        assert id(cafd.data[i]) == ids[i]
+    time = 3.0
+    caf.initialize(time=time)
+    refd.data = func_vec_6(refd.data, *(coords + (time, theta)))
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd[i], refd.data[i])
+        assert id(cafd.data[i]) == ids[i]
+
+
+# Non-Vectorized formula for a vector, initialization on several
+# topologies.
+def test_analytical_field_9():
+    box = Box()
+    topo = box.create_topology(d3D)
+    res2 = Discretization([65, 33, 65], [1, 1, 1])
+    topo2 = box.create_topology(res2, dim=2)
+    coords = topo.mesh.coords
+    coords2 = topo2.mesh.coords
+    caf = Field(box, formula=func_vec_1, isVector=True)
+    ref = Field(box, isVector=True)
+    refd = ref.discretize(topo)
+    cafd = caf.discretize(topo)
+    cafd2 = caf.discretize(topo2)
+    refd2 = ref.discretize(topo2)
+    ids = [0, ] * 3
+    for i in xrange(3):
+        ids[i] = id(cafd2.data[i])
+        # init on topo2
+    caf.initialize(topo=topo2)
+    refd2.data = func_vec_1(refd2.data, *(coords2 + (0.,)))
+    refd.data = func_vec_1(refd.data, *(coords + (0.,)))
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd2[i], refd2.data[i])
+        assert id(cafd2.data[i]) == ids[i]
+        assert not allclose(cafd[i], refd.data[i])
+    caf.initialize(topo=topo)
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd[i], refd.data[i])
+
+
+# Non-Vectorized formula for a vector, initialization on several
+# topologies.
+def test_analytical_field_10():
+    box = Box()
+    topo = box.create_topology(d3D)
+    res2 = Discretization([65, 33, 65], [1, 1, 1])
+    topo2 = box.create_topology(res2, dim=2)
+    coords = topo.mesh.coords
+    coords2 = topo2.mesh.coords
+    caf = Field(box, formula=func_vec_1, isVector=True)
+    ref = Field(box, isVector=True)
+    refd = ref.discretize(topo)
+    cafd = caf.discretize(topo)
+    cafd2 = caf.discretize(topo2)
+    refd2 = ref.discretize(topo2)
+    ids = [0, ] * 3
+    for i in xrange(3):
+        ids[i] = id(cafd2.data[i])
+    # init on all topos
+    caf.initialize()
+    refd2.data = func_vec_1(refd2.data, *(coords2 + (0.,)))
+    refd.data = func_vec_1(refd.data, *(coords + (0.,)))
+    for i in xrange(caf.nbComponents):
+        assert allclose(cafd2[i], refd2.data[i])
+        assert allclose(cafd[i], refd.data[i])
+        assert id(cafd2.data[i]) == ids[i]
+
 
 # This may be useful to run mpi tests
 if __name__ == "__main__":
     test_continuous()
     test_analytical()
     test_analytical_reset()
-    test_integrate_onSurf()
     test_discretization()
+    test_analytical_field_1()
+    test_analytical_field_2()
+    test_analytical_field_3()
+    test_analytical_field_4()
+    test_analytical_field_5()
+    test_analytical_field_6()
+    test_analytical_field_7()
+    test_analytical_field_8()
diff --git a/HySoP/hysop/fields/variable_parameter.pyc b/HySoP/hysop/fields/variable_parameter.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab56fcc359ff9c63a32c9b2768cc98c5eb24b820
Binary files /dev/null and b/HySoP/hysop/fields/variable_parameter.pyc differ
diff --git a/HySoP/hysop/gpu/QtRendering.py b/HySoP/hysop/gpu/QtRendering.py
index 4a3ccfdbc702a636be66c2305e35ce1d66882843..83ad1410773dea12d30656f43c9394417d459373 100644
--- a/HySoP/hysop/gpu/QtRendering.py
+++ b/HySoP/hysop/gpu/QtRendering.py
@@ -4,7 +4,6 @@
 Contains all stuff to perform real-time rendering on GPU.
 """
 from parmepy.constants import debug, np, PARMES_REAL
-from parmepy.operator.monitors.monitoring import Monitoring
 import sys
 from PyQt4 import QtGui, QtCore
 from PyQt4.QtOpenGL import QGLWidget
@@ -13,11 +12,11 @@ from parmepy.gpu.tools import get_opengl_shared_environment
 from parmepy.gpu import cl
 from parmepy.gpu.gpu_discrete import GPUDiscreteField
 from parmepy.gpu.gpu_kernel import KernelLauncher
-from parmepy.tools.timers import timed_function
 from parmepy.mpi import main_rank
+from parmepy.operator.computational import Computational
 
 
-class QtOpenGLRendering(Monitoring):
+class QtOpenGLRendering(Computational):
     """
     Monitor that performs the rendering.
 
@@ -50,7 +49,8 @@ class QtOpenGLRendering(Monitoring):
 
         Store a QApplication and a QMainWindow objects.
         """
-        Monitoring.__init__(self, [field], frequency=1, name="QtRendering")
+        super(QtOpenGLRendering, self)._init__([field],
+                                               frequency=1, name="QtRendering")
         if not field.dimension == 2:
             raise ValueError("Rendering implemented in 2D only.")
         ## Qt application
@@ -65,7 +65,7 @@ class QtOpenGLRendering(Monitoring):
         self.mtime = 0.
 
     @debug
-    def setUp(self):
+    def setup(self):
         """
         Create two VBOs buffers: GL_STATIC_DRAW and GL_COLOR_ARRAY.
         Create two OpenCL GLBuffers bound to VBOs.
@@ -116,7 +116,7 @@ class QtOpenGLRendering(Monitoring):
             self.window.widget.cl_env.ctx, cl.mem_flags.READ_WRITE,
             int(self.color_vbo))
         # Pass VBO and GLBuffers to the QGLWidget
-        self.window.widget.setUp(gl_buffers=[self.pos, self.color],
+        self.window.widget.setup(gl_buffers=[self.pos, self.color],
                                  color_vbo=self.color_vbo,
                                  pos_vbo=self.pos_vbo,
                                  partNumber=np.prod(
@@ -158,7 +158,6 @@ class QtOpenGLRendering(Monitoring):
         self.initCoordinates(self.pos, coord_min, mesh_size)
 
     @debug
-    @timed_function
     def apply(self, simulation):
         """
         Update the color GLBuffer and redraw the QGLWidget.
@@ -193,7 +192,6 @@ class QtOpenGLRendering(Monitoring):
         if self.numMethod.f_timer is not None:
             for f_timer in self.numMethod.f_timer:
                 self.timer.addFunctionTimer(f_timer)
-        Monitoring.finalize(self)
 
     @debug
     def startMainLoop(self):
@@ -277,7 +275,7 @@ class GLWidget(QGLWidget):
         self.partNumber = None
         self.width, self.height = 600, 600
 
-    def setUp(self, gl_buffers, color_vbo, pos_vbo, partNumber):
+    def setup(self, gl_buffers, color_vbo, pos_vbo, partNumber):
         """Set up VBOs and GLBuffers"""
         self.gl_objects = gl_buffers
         self.color_vbo, self.pos_vbo = color_vbo, pos_vbo
diff --git a/HySoP/hysop/gpu/cl_src/kernels/advection_and_remeshing_vector_2d.cl b/HySoP/hysop/gpu/cl_src/kernels/advection_and_remeshing_vector_2d.cl
new file mode 100644
index 0000000000000000000000000000000000000000..10a605498143598c3ee501bc516c30ea419514a7
--- /dev/null
+++ b/HySoP/hysop/gpu/cl_src/kernels/advection_and_remeshing_vector_2d.cl
@@ -0,0 +1,83 @@
+/**
+ * @file advection_and_remeshing_vector_2d.cl
+ * Advection and remeshing kernel for 2D vector advection.
+ */
+
+/**
+ * Performs advection and then remeshing of the particles' vector.
+ * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
+ * Each work-item computes NB_I/WI_NB particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
+ * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
+ * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
+ *
+ * @param gvelo Velocity field
+ * @param pscal Particle scalar
+ * @param gscal Grid scalar
+ * @param dt Time step
+ * @param min_position Domain lower coordinate
+ * @param dx Space step
+ *
+ * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
+ * @remark <code>WI_NB</code> corresponds to the work-item number.
+ * @remark <code>__N__</code> is expanded at compilation time by vector width.
+ * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
+ * @see parmepy.gpu.tools.parse_file
+ */
+__kernel void advection_and_remeshing(__global const float* gvelo,
+				      __global const float* pvec_X,
+				      __global const float* pvec_Y,
+				      __global float* gvec_X,
+				      __global float* gvec_Y,
+				      float dt,float min_position, float dx)
+{
+  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
+  uint gidY = get_global_id(1); /* OpenCL work-itme global index (Y) */
+  uint gidZ = get_global_id(2); /* OpenCL work-itme global index (Z) */
+  float invdx = 1.0/dx;		/* Space step inverse */
+  uint i;			/* Particle index in 1D problem */
+  float__N__ p,			/* Particle position */
+    pv_X, pv_Y,			/* Particle vector */
+    v;				/* Particle velocity */
+  uint line_index = gidY*NB_I+ gidZ*NB_I*NB_II; /* Current 1D problem index */
+
+  __local float gvec_X_loc[NB_I]; /* Local buffer for result */
+  __local float gvec_Y_loc[NB_I]; /* Local buffer for result */
+  __local float gvelo_loc[NB_I]; /* Velocity cache */
+
+  for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__))
+    {
+      /* Read velocity */
+      v = vload__N__((i+line_index)/__N__, gvelo);
+      /* Fill velocity cache */
+      gvelo_loc[noBC_id(i+__NN__)] = v.s__NN__;
+      /* Initialize result buffer */
+      gvec_X_loc[noBC_id(i+__NN__)] = 0.0;
+      gvec_Y_loc[noBC_id(i+__NN__)] = 0.0;
+    }
+
+  /* Synchronize work-group */
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=__N__)
+    {
+      /* Read Particle scalar */
+      pv_X = vload__N__((i + line_index)/__N__, pvec_X);
+      pv_Y = vload__N__((i + line_index)/__N__, pvec_Y);
+      /* Compute particle position */
+      p = advection(i, dt, dx, invdx, gvelo_loc);
+      /* Remesh particle */
+      remesh(i, dx, invdx, pv_X, pv_Y, p, gvec_X_loc, gvec_Y_loc);
+    }
+
+  /* Synchronize work-group */
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__))
+    {
+      /* Store result */
+      vstore__N__((float__N__)(gvec_X_loc[noBC_id(i+__NN__)],
+			       ), (i + line_index)/__N__, gvec_X);
+      vstore__N__((float__N__)(gvec_Y_loc[noBC_id(i+__NN__)],
+			       ), (i + line_index)/__N__, gvec_Y);
+    }
+}
diff --git a/HySoP/hysop/gpu/cl_src/kernels/advection_and_remeshing_vector_3d.cl b/HySoP/hysop/gpu/cl_src/kernels/advection_and_remeshing_vector_3d.cl
new file mode 100644
index 0000000000000000000000000000000000000000..abbdbe006a226f4fcbcd01d97b33201653c54a36
--- /dev/null
+++ b/HySoP/hysop/gpu/cl_src/kernels/advection_and_remeshing_vector_3d.cl
@@ -0,0 +1,90 @@
+/**
+ * @file advection_and_remeshing_vector_3d.cl
+ * Advection and remeshing kernel for 3D vector advection.
+ */
+
+/**
+ * Performs advection and then remeshing of the particles' vector.
+ * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
+ * Each work-item computes NB_I/WI_NB particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
+ * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
+ * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
+ *
+ * @param gvelo Velocity field
+ * @param pscal Particle scalar
+ * @param gscal Grid scalar
+ * @param dt Time step
+ * @param min_position Domain lower coordinate
+ * @param dx Space step
+ *
+ * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
+ * @remark <code>WI_NB</code> corresponds to the work-item number.
+ * @remark <code>__N__</code> is expanded at compilation time by vector width.
+ * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
+ * @see parmepy.gpu.tools.parse_file
+ */
+__kernel void advection_and_remeshing(__global const float* gvelo,
+				      __global const float* pvec_X,
+				      __global const float* pvec_Y,
+				      __global const float* pvec_Z,
+				      __global float* gvec_X,
+				      __global float* gvec_Y,
+				      __global float* gvec_Z,
+				      float dt,float min_position, float dx)
+{
+  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
+  uint gidY = get_global_id(1); /* OpenCL work-itme global index (Y) */
+  uint gidZ = get_global_id(2); /* OpenCL work-itme global index (Z) */
+  float invdx = 1.0/dx;		/* Space step inverse */
+  uint i;			/* Particle index in 1D problem */
+  float__N__ p,			/* Particle position */
+    pv_X, pv_Y, pv_Z,			/* Particle vector */
+    v;				/* Particle velocity */
+  uint line_index = gidY*NB_I+ gidZ*NB_I*NB_II; /* Current 1D problem index */
+
+  __local float gvec_X_loc[NB_I]; /* Local buffer for result */
+  __local float gvec_Y_loc[NB_I]; /* Local buffer for result */
+  __local float gvec_Z_loc[NB_I]; /* Local buffer for result */
+  __local float gvelo_loc[NB_I]; /* Velocity cache */
+
+  for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__))
+    {
+      /* Read velocity */
+      v = vload__N__((i+line_index)/__N__, gvelo);
+      /* Fill velocity cache */
+      gvelo_loc[noBC_id(i+__NN__)] = v.s__NN__;
+      /* Initialize result buffer */
+      gvec_X_loc[noBC_id(i+__NN__)] = 0.0;
+      gvec_Y_loc[noBC_id(i+__NN__)] = 0.0;
+      gvec_Z_loc[noBC_id(i+__NN__)] = 0.0;
+    }
+
+  /* Synchronize work-group */
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=__N__)
+    {
+      /* Read Particle scalar */
+      pv_X = vload__N__((i + line_index)/__N__, pvec_X);
+      pv_Y = vload__N__((i + line_index)/__N__, pvec_Y);
+      pv_Z = vload__N__((i + line_index)/__N__, pvec_Z);
+      /* Compute particle position */
+      p = advection(i, dt, dx, invdx, gvelo_loc);
+      /* Remesh particle */
+      remesh(i, dx, invdx, pv_X, pv_Y, pv_Z, p, gvec_X_loc, gvec_Y_loc, gvec_Z_loc);
+    }
+
+  /* Synchronize work-group */
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__))
+    {
+      /* Store result */
+      vstore__N__((float__N__)(gvec_X_loc[noBC_id(i+__NN__)],
+			       ), (i + line_index)/__N__, gvec_X);
+      vstore__N__((float__N__)(gvec_Y_loc[noBC_id(i+__NN__)],
+			       ), (i + line_index)/__N__, gvec_Y);
+      vstore__N__((float__N__)(gvec_Z_loc[noBC_id(i+__NN__)],
+			       ), (i + line_index)/__N__, gvec_Z);
+    }
+}
diff --git a/HySoP/hysop/gpu/cl_src/kernels/remeshing_vector_2d.cl b/HySoP/hysop/gpu/cl_src/kernels/remeshing_vector_2d.cl
new file mode 100644
index 0000000000000000000000000000000000000000..84cb5d3605ee633f4eb51124c2fd9f88e44a62d3
--- /dev/null
+++ b/HySoP/hysop/gpu/cl_src/kernels/remeshing_vector_2d.cl
@@ -0,0 +1,76 @@
+/**
+ * @file remeshing_vector_2d.cl
+ * Remeshing kernel.
+ */
+
+/**
+ * Performs remeshing of the particles' vector in 2d.
+ * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
+ * Each work-item computes <code>NB_I/WI_NB</code> particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
+ * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
+ * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
+ *
+ * @param ppos Particle position
+ * @param pscal Particle scalar
+ * @param gscal Grid scalar
+ * @param min_position Domain lower coordinate
+ * @param dx Space step
+ *
+ * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
+ * @remark <code>WI_NB</code> corresponds to the work-item number.
+ * @remark <code>__N__</code> is expanded at compilation time by vector width.
+ * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
+ * @see parmepy.gpu.tools.parse_file
+ */
+__kernel void remeshing_kernel(__global const float* ppos,
+			       __global const float* pvec_X,
+			       __global const float* pvec_Y,
+			       __global float* gvec_X,
+			       __global float* gvec_Y,
+			       float min_position, float dx)
+{
+  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
+  uint gidY = get_global_id(1); /* OpenCL work-itme global index (Y) */
+  uint gidZ = get_global_id(2); /* OpenCL work-itme global index (Z) */
+  float invdx = 1.0/dx;         /* Space step inverse */
+  uint i;			/* Particle index in 1D problem */
+  float__N__ p,			/* Particle position */
+    v_X, v_Y;			/* Particle quantity */
+  uint line_index = gidY*NB_I+ gidZ*NB_I*NB_II; /* Current 1D problem index */
+
+  __local float gvec_X_loc[NB_I]; /* Local buffer for result */
+  __local float gvec_Y_loc[NB_I]; /* Local buffer for result */
+
+  for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__))
+    {
+      /* Initialize result buffer */
+      gvec_X_loc[i+__NN__] = 0.0;
+      gvec_Y_loc[i+__NN__] = 0.0;
+    }
+
+  /* Synchronize work-group */
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=__N__)
+    {
+      /* Read particle position */
+      p = vload__N__((i + line_index)/__N__, ppos) - (float__N__)(min_position);
+      /* Read particle scalar */
+      v_X = vload__N__((i + line_index)/__N__, pvec_X);
+      v_Y = vload__N__((i + line_index)/__N__, pvec_Y);
+      /* Remesh particle */
+      remesh(i, dx, invdx, v_X, v_Y, p, gvec_X_loc, gvec_Y_loc);
+    }
+
+  /* Synchronize work-group */
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__))
+    {
+      /* Store result */
+      vstore__N__((float__N__)(gvec_X_loc[noBC_id(i+__NN__)],
+			       ),(i + line_index)/__N__, gvec_X);
+      vstore__N__((float__N__)(gvec_Y_loc[noBC_id(i+__NN__)],
+			       ),(i + line_index)/__N__, gvec_Y);
+  }
+}
diff --git a/HySoP/hysop/gpu/cl_src/kernels/remeshing_vector_3d.cl b/HySoP/hysop/gpu/cl_src/kernels/remeshing_vector_3d.cl
new file mode 100644
index 0000000000000000000000000000000000000000..0f66a22ab3691b2f465c8380d8b0a88c7dd67022
--- /dev/null
+++ b/HySoP/hysop/gpu/cl_src/kernels/remeshing_vector_3d.cl
@@ -0,0 +1,83 @@
+/**
+ * @file remeshing_vector_3d.cl
+ * Remeshing kernel.
+ */
+
+/**
+ * Performs remeshing of the particles' vector in 3d.
+ * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
+ * Each work-item computes <code>NB_I/WI_NB</code> particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
+ * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
+ * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
+ *
+ * @param ppos Particle position
+ * @param pscal Particle scalar
+ * @param gscal Grid scalar
+ * @param min_position Domain lower coordinate
+ * @param dx Space step
+ *
+ * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
+ * @remark <code>WI_NB</code> corresponds to the work-item number.
+ * @remark <code>__N__</code> is expanded at compilation time by vector width.
+ * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
+ * @see parmepy.gpu.tools.parse_file
+ */
+__kernel void remeshing_kernel(__global const float* ppos,
+			       __global const float* pvec_X,
+			       __global const float* pvec_Y,
+			       __global const float* pvec_Z,
+			       __global float* gvec_X,
+			       __global float* gvec_Y,
+			       __global float* gvec_Z,
+			       float min_position, float dx)
+{
+  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
+  uint gidY = get_global_id(1); /* OpenCL work-itme global index (Y) */
+  uint gidZ = get_global_id(2); /* OpenCL work-itme global index (Z) */
+  float invdx = 1.0/dx;         /* Space step inverse */
+  uint i;			/* Particle index in 1D problem */
+  float__N__ p,			/* Particle position */
+    v_X, v_Y, v_Z;			/* Particle quantity */
+  uint line_index = gidY*NB_I+ gidZ*NB_I*NB_II; /* Current 1D problem index */
+
+  __local float gvec_X_loc[NB_I]; /* Local buffer for result */
+  __local float gvec_Y_loc[NB_I]; /* Local buffer for result */
+  __local float gvec_Z_loc[NB_I]; /* Local buffer for result */
+
+  for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__))
+    {
+      /* Initialize result buffer */
+      gvec_X_loc[i+__NN__] = 0.0;
+      gvec_Y_loc[i+__NN__] = 0.0;
+      gvec_Z_loc[i+__NN__] = 0.0;
+    }
+
+  /* Synchronize work-group */
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=__N__)
+    {
+      /* Read particle position */
+      p = vload__N__((i + line_index)/__N__, ppos) - (float__N__)(min_position);
+      /* Read particle scalar */
+      v_X = vload__N__((i + line_index)/__N__, pvec_X);
+      v_Y = vload__N__((i + line_index)/__N__, pvec_Y);
+      v_Z = vload__N__((i + line_index)/__N__, pvec_Z);
+      /* Remesh particle */
+      remesh(i, dx, invdx, v_X, v_Y, v_Z, p, gvec_X_loc, gvec_Y_loc, gvec_Z_loc);
+    }
+
+  /* Synchronize work-group */
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__))
+    {
+      /* Store result */
+      vstore__N__((float__N__)(gvec_X_loc[noBC_id(i+__NN__)],
+			       ),(i + line_index)/__N__, gvec_X);
+      vstore__N__((float__N__)(gvec_Y_loc[noBC_id(i+__NN__)],
+			       ),(i + line_index)/__N__, gvec_Y);
+      vstore__N__((float__N__)(gvec_Z_loc[noBC_id(i+__NN__)],
+			       ),(i + line_index)/__N__, gvec_Z);
+  }
+}
diff --git a/HySoP/hysop/gpu/cl_src/remeshing/basic_noVec_vector_2d.cl b/HySoP/hysop/gpu/cl_src/remeshing/basic_noVec_vector_2d.cl
new file mode 100644
index 0000000000000000000000000000000000000000..abb672668533bcc6ba6bd534f3c683b8baf3d230
--- /dev/null
+++ b/HySoP/hysop/gpu/cl_src/remeshing/basic_noVec_vector_2d.cl
@@ -0,0 +1,111 @@
+/**
+ * @file basic_noVec_vector_2d.cl
+ * Remeshing function, vectorized version for 2D vector remeshing.
+ */
+
+void remesh(uint i, float dx, float invdx,
+	    float v_X, float v_Y,
+	    float p,
+	    __local float* gvec_X_loc, __local float* gvec_Y_loc);
+
+
+/**
+ * Remesh particles in local buffer.
+ *
+ * Remeshing formula is given a compiling time.
+ * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
+ *
+ * @param i Particle index
+ * @param dx Space step
+ * @param invdx 1/dx
+ * @param s Particle scalar
+ * @param p Particle position
+ * @param gscal_loc Local buffer for result
+ *
+ * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
+ * @remark <code>__N__</code> is expanded at compilation time by vector width.
+ * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
+ * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
+ * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
+ * @see parmepy.gpu.tools.parse_file
+ * @see parmepy.gpu.cl_src.common
+ */
+void remesh(uint i, float dx, float invdx,
+	    float v_X, float v_Y,
+	    float p,
+	    __local float* gvec_X_loc, __local float* gvec_Y_loc){
+  float y;			/* Normalized distance to nearest left grid point */
+  int ind;			/* Integer coordinate */
+  uint index;		/* Remeshing index */
+  float w;
+
+  ind = convert_int_rtn(p * invdx);
+  y = (p - convert_float(ind) * dx) * invdx;
+
+  index = convert_uint((ind - REMESH_SHIFT + NB_I) % NB_I);
+
+  w = REMESH(alpha)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(beta)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(gamma)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(delta)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+#if REMESH_SHIFT > 1
+  index = (index + 1) % NB_I;
+  w = REMESH(eta)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(zeta)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+
+#if REMESH_SHIFT > 2
+  index = (index + 1) % NB_I;
+  w = REMESH(theta)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(iota)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+
+#if REMESH_SHIFT > 3
+  index = (index + 1) % NB_I;
+  w = REMESH(kappa)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(mu)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+}
diff --git a/HySoP/hysop/gpu/cl_src/remeshing/basic_noVec_vector_3d.cl b/HySoP/hysop/gpu/cl_src/remeshing/basic_noVec_vector_3d.cl
new file mode 100644
index 0000000000000000000000000000000000000000..c912769d9fb96fbce7284933bc8a46df117599a7
--- /dev/null
+++ b/HySoP/hysop/gpu/cl_src/remeshing/basic_noVec_vector_3d.cl
@@ -0,0 +1,121 @@
+/**
+ * @file basic_noVec_vector_3d.cl
+ * Remeshing function, vectorized version for 3D vector remeshing.
+ */
+
+void remesh(uint i, float dx, float invdx,
+	    float v_X, float v_Y, float v_Z,
+	    float p,
+	    __local float* gvec_X_loc, __local float* gvec_Y_loc, __local float* gvec_Z_loc);
+
+
+/**
+ * Remesh particles in local buffer.
+ *
+ * Remeshing formula is given a compiling time.
+ * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
+ *
+ * @param i Particle index
+ * @param dx Space step
+ * @param invdx 1/dx
+ * @param s Particle scalar
+ * @param p Particle position
+ * @param gscal_loc Local buffer for result
+ *
+ * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
+ * @remark <code>__N__</code> is expanded at compilation time by vector width.
+ * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
+ * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
+ * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
+ * @see parmepy.gpu.tools.parse_file
+ * @see parmepy.gpu.cl_src.common
+ */
+void remesh(uint i, float dx, float invdx,
+	    float v_X, float v_Y, float v_Z,
+	    float p,
+	    __local float* gvec_X_loc, __local float* gvec_Y_loc, __local float* gvec_Z_loc){
+  float y;			/* Normalized distance to nearest left grid point */
+  int ind;			/* Integer coordinate */
+  uint index;		/* Remeshing index */
+  float w;
+
+  ind = convert_int_rtn(p * invdx);
+  y = (p - convert_float(ind) * dx) * invdx;
+
+  index = convert_uint((ind - REMESH_SHIFT + NB_I) % NB_I);
+
+  w = REMESH(alpha)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  gvec_Z_loc[noBC_id(index)] += (w * v_Z);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(beta)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  gvec_Z_loc[noBC_id(index)] += (w * v_Z);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(gamma)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  gvec_Z_loc[noBC_id(index)] += (w * v_Z);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(delta)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  gvec_Z_loc[noBC_id(index)] += (w * v_Z);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+#if REMESH_SHIFT > 1
+  index = (index + 1) % NB_I;
+  w = REMESH(eta)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  gvec_Z_loc[noBC_id(index)] += (w * v_Z);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(zeta)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  gvec_Z_loc[noBC_id(index)] += (w * v_Z);
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+
+#if REMESH_SHIFT > 2
+  index = (index + 1) % NB_I;
+  w = REMESH(theta)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  gvec_Z_loc[noBC_id(index)] += (w * v_Z);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(iota)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  gvec_Z_loc[noBC_id(index)] += (w * v_Z);
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+
+#if REMESH_SHIFT > 3
+  index = (index + 1) % NB_I;
+  w = REMESH(kappa)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  gvec_Z_loc[noBC_id(index)] += (w * v_Z);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(mu)(y);
+  gvec_X_loc[noBC_id(index)] += (w * v_X);
+  gvec_Y_loc[noBC_id(index)] += (w * v_Y);
+  gvec_Z_loc[noBC_id(index)] += (w * v_Z);
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+}
diff --git a/HySoP/hysop/gpu/cl_src/remeshing/basic_vector_2d.cl b/HySoP/hysop/gpu/cl_src/remeshing/basic_vector_2d.cl
new file mode 100644
index 0000000000000000000000000000000000000000..da8d9234bea16158cf5d215410ca0cec2cbc3fe1
--- /dev/null
+++ b/HySoP/hysop/gpu/cl_src/remeshing/basic_vector_2d.cl
@@ -0,0 +1,111 @@
+/**
+ * @file basic_vector_2d.cl
+ * Remeshing function, vectorized version for vector remeshing in 2D.
+ */
+
+void remesh(uint i, float dx, float invdx,
+	    float__N__ v_X, float__N__ v_Y,
+	    float__N__ p,
+	    __local float* gvec_X_loc, __local float* gvec_Y_loc);
+
+
+/**
+ * Remesh particles in local buffer.
+ *
+ * Remeshing formula is given a compiling time.
+ * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
+ *
+ * @param i Particle index
+ * @param dx Space step
+ * @param invdx 1/dx
+ * @param s Particle scalar
+ * @param p Particle position
+ * @param gscal_loc Local buffer for result
+ *
+ * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
+ * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
+ * @remark <code>__N__</code> is expanded at compilation time by vector width.
+ * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
+ * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
+ * @see parmepy.gpu.tools.parse_file
+ * @see parmepy.gpu.cl_src.common
+ */
+void remesh(uint i, float dx, float invdx,
+	    float__N__ v_X, float__N__ v_Y,
+	    float__N__ p,
+	    __local float* gvec_X_loc,  __local float* gvec_Y_loc){
+  float__N__ y;			/* Normalized distance to nearest left grid point */
+  int__N__ ind;			/* Integer coordinate */
+  uint__N__ index;		/* Remeshing index */
+  float w__NN__;
+
+  ind = convert_int__N___rtn(p * invdx);
+  y = (p - convert_float__N__(ind) * dx) * invdx;
+
+  index = convert_uint__N__((ind - REMESH_SHIFT + NB_I) % NB_I);
+
+  w__NN__ = REMESH(alpha)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(beta)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(gamma)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(delta)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+#if REMESH_SHIFT > 1
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(eta)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(zeta)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+
+#if REMESH_SHIFT > 2
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(theta)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(iota)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+
+#if REMESH_SHIFT > 3
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(kappa)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(mu)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+}
diff --git a/HySoP/hysop/gpu/cl_src/remeshing/basic_vector_3d.cl b/HySoP/hysop/gpu/cl_src/remeshing/basic_vector_3d.cl
new file mode 100644
index 0000000000000000000000000000000000000000..ed3f4a397583984ef7b1b65a9ce6fa03b4e2e461
--- /dev/null
+++ b/HySoP/hysop/gpu/cl_src/remeshing/basic_vector_3d.cl
@@ -0,0 +1,121 @@
+/**
+ * @file basic_vector_3d.cl
+ * Remeshing function, vectorized version for vector remeshing in 3D.
+ */
+
+void remesh(uint i, float dx, float invdx,
+	    float__N__ v_X, float__N__ v_Y, float__N__ v_Z,
+	    float__N__ p,
+	    __local float* gvec_X_loc, __local float* gvec_Y_loc,  __local float* gvec_Z_loc);
+
+
+/**
+ * Remesh particles in local buffer.
+ *
+ * Remeshing formula is given a compiling time.
+ * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
+ *
+ * @param i Particle index
+ * @param dx Space step
+ * @param invdx 1/dx
+ * @param s Particle scalar
+ * @param p Particle position
+ * @param gscal_loc Local buffer for result
+ *
+ * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
+ * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
+ * @remark <code>__N__</code> is expanded at compilation time by vector width.
+ * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
+ * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
+ * @see parmepy.gpu.tools.parse_file
+ * @see parmepy.gpu.cl_src.common
+ */
+void remesh(uint i, float dx, float invdx,
+	    float__N__ v_X, float__N__ v_Y,float__N__ v_Z,
+	    float__N__ p,
+	    __local float* gvec_X_loc,  __local float* gvec_Y_loc, __local float* gvec_Z_loc){
+  float__N__ y;			/* Normalized distance to nearest left grid point */
+  int__N__ ind;			/* Integer coordinate */
+  uint__N__ index;		/* Remeshing index */
+  float w__NN__;
+
+  ind = convert_int__N___rtn(p * invdx);
+  y = (p - convert_float__N__(ind) * dx) * invdx;
+
+  index = convert_uint__N__((ind - REMESH_SHIFT + NB_I) % NB_I);
+
+  w__NN__ = REMESH(alpha)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  gvec_Z_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Z.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(beta)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  gvec_Z_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Z.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(gamma)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  gvec_Z_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Z.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(delta)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  gvec_Z_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Z.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+#if REMESH_SHIFT > 1
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(eta)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  gvec_Z_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Z.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(zeta)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  gvec_Z_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Z.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+
+#if REMESH_SHIFT > 2
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(theta)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  gvec_Z_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Z.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(iota)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  gvec_Z_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Z.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+
+#if REMESH_SHIFT > 3
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(kappa)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  gvec_Z_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Z.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w__NN__ = REMESH(mu)(y.s__NN__);
+  gvec_X_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_X.s__NN__);
+  gvec_Y_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Y.s__NN__);
+  gvec_Z_loc[noBC_id(index.s__NN__)] += (w__NN__ * v_Z.s__NN__);
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+}
diff --git a/HySoP/hysop/gpu/cl_src/remeshing/private_vector_2d.cl b/HySoP/hysop/gpu/cl_src/remeshing/private_vector_2d.cl
new file mode 100644
index 0000000000000000000000000000000000000000..fcb1e443c40e568e7ba9c665659f6936343509c1
--- /dev/null
+++ b/HySoP/hysop/gpu/cl_src/remeshing/private_vector_2d.cl
@@ -0,0 +1,112 @@
+/**
+ * @file private_vector_2d.cl
+ * Remeshing function, vectorized, private variable for 2D vector remeshing.
+ */
+
+void remesh(uint i, float dx, float invdx,
+	    float__N__ v_X, float__N__ v_Y,
+	    float__N__ p,
+	    __local float* gvec_X_loc, __local float* gvec_Y_loc);
+
+
+/**
+ * Remesh particles in local buffer.
+ *
+ * Remeshing formula is given a compiling time.
+ * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
+ * Use of a private temporary variable for remeshing weights.
+ *
+ * @param i Particle index
+ * @param dx Space step
+ * @param invdx 1/dx
+ * @param s Particle scalar
+ * @param p Particle position
+ * @param gscal_loc Local buffer for result
+ *
+ * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
+ * @remark <code>__N__</code> is expanded at compilation time by vector width.
+ * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
+ * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
+ * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
+ * @see parmepy.gpu.tools.parse_file
+ * @see parmepy.gpu.cl_src.common
+ */
+void remesh(uint i, float dx, float invdx,
+	    float__N__ v_X, float__N__ v_Y,
+	    float__N__ p,
+	    __local float* gvec_X_loc, __local float* gvec_Y_loc){
+  float__N__ y,			/* Normalized distance to nearest left grid point */
+    w;			        /* Temporary remeshing weights */
+  int__N__ ind;			/* Integer coordinate */
+  uint__N__ index;		/* Remeshing index */
+
+  ind = convert_int__N___rtn(p * invdx);
+  y = (p - convert_float__N__(ind) * dx) * invdx;
+
+  index = convert_uint__N__((ind - REMESH_SHIFT + NB_I) % NB_I);
+
+  w = REMESH(alpha)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(beta)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(gamma)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(delta)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+#if REMESH_SHIFT > 1
+  index = (index + 1) % NB_I;
+  w = REMESH(eta)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(zeta)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+
+#if REMESH_SHIFT > 2
+  index = (index + 1) % NB_I;
+  w = REMESH(theta)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(iota)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+
+#if REMESH_SHIFT > 3
+  index = (index + 1) % NB_I;
+  w = REMESH(kappa)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(mu)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+}
diff --git a/HySoP/hysop/gpu/cl_src/remeshing/private_vector_3d.cl b/HySoP/hysop/gpu/cl_src/remeshing/private_vector_3d.cl
new file mode 100644
index 0000000000000000000000000000000000000000..dabd8e5d8dc285696a5129933adf1da1b759f4b3
--- /dev/null
+++ b/HySoP/hysop/gpu/cl_src/remeshing/private_vector_3d.cl
@@ -0,0 +1,122 @@
+/**
+ * @file private_vector_3d.cl
+ * Remeshing function, vectorized, private variable for 3D vector remeshing.
+ */
+
+void remesh(uint i, float dx, float invdx,
+	    float__N__ v_X, float__N__ v_Y,float__N__ v_Z,
+	    float__N__ p,
+	    __local float* gvec_X_loc, __local float* gvec_Y_loc, __local float* gvec_Z_loc);
+
+
+/**
+ * Remesh particles in local buffer.
+ *
+ * Remeshing formula is given a compiling time.
+ * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
+ * Use of a private temporary variable for remeshing weights.
+ *
+ * @param i Particle index
+ * @param dx Space step
+ * @param invdx 1/dx
+ * @param s Particle scalar
+ * @param p Particle position
+ * @param gscal_loc Local buffer for result
+ *
+ * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
+ * @remark <code>__N__</code> is expanded at compilation time by vector width.
+ * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
+ * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
+ * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
+ * @see parmepy.gpu.tools.parse_file
+ * @see parmepy.gpu.cl_src.common
+ */
+void remesh(uint i, float dx, float invdx,
+	    float__N__ v_X, float__N__ v_Y,float__N__ v_Z,
+	    float__N__ p,
+	    __local float* gvec_X_loc, __local float* gvec_Y_loc, __local float* gvec_Z_loc){
+  float__N__ y,			/* Normalized distance to nearest left grid point */
+    w;			        /* Temporary remeshing weights */
+  int__N__ ind;			/* Integer coordinate */
+  uint__N__ index;		/* Remeshing index */
+
+  ind = convert_int__N___rtn(p * invdx);
+  y = (p - convert_float__N__(ind) * dx) * invdx;
+
+  index = convert_uint__N__((ind - REMESH_SHIFT + NB_I) % NB_I);
+
+  w = REMESH(alpha)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  gvec_Z_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Z.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(beta)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  gvec_Z_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Z.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(gamma)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  gvec_Z_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Z.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(delta)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  gvec_Z_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Z.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+#if REMESH_SHIFT > 1
+  index = (index + 1) % NB_I;
+  w = REMESH(eta)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  gvec_Z_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Z.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(zeta)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  gvec_Z_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Z.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+
+#if REMESH_SHIFT > 2
+  index = (index + 1) % NB_I;
+  w = REMESH(theta)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  gvec_Z_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Z.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(iota)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  gvec_Z_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Z.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+
+#if REMESH_SHIFT > 3
+  index = (index + 1) % NB_I;
+  w = REMESH(kappa)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  gvec_Z_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Z.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  index = (index + 1) % NB_I;
+  w = REMESH(mu)(y);
+  gvec_X_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_X.s__NN__;
+  gvec_Y_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Y.s__NN__;
+  gvec_Z_loc[noBC_id(index.s__NN__)] += w.s__NN__ * v_Z.s__NN__;
+  barrier(CLK_LOCAL_MEM_FENCE);
+#endif
+}
diff --git a/HySoP/hysop/gpu/gpu_discrete.py b/HySoP/hysop/gpu/gpu_discrete.py
index a75229a04b1f94096c77275af963c87233683e16..95ce7b7789706fa24af82f944edb7bd4357161c8 100644
--- a/HySoP/hysop/gpu/gpu_discrete.py
+++ b/HySoP/hysop/gpu/gpu_discrete.py
@@ -4,13 +4,13 @@
 Contains class for discrete fields on GPU.
 """
 from parmepy import __VERBOSE__
-from parmepy.constants import ORDER, np, debug, PARMES_REAL, PARMES_INTEGER, S_DIR
+from parmepy.constants import ORDER, np,\
+    debug, PARMES_REAL, PARMES_INTEGER, S_DIR
 from parmepy.fields.discrete import DiscreteField
-from parmepy.gpu import cl, clArray, CL_PROFILE
+from parmepy.gpu import cl, CL_PROFILE
 from parmepy.gpu.gpu_kernel import KernelLauncher, KernelListLauncher
 from parmepy.tools.numpywrappers import zeros
-from parmepy.mpi.main_var import main_rank, main_size
-from parmepy.tools.timers import ManualFunctionTimer
+from parmepy.tools.profiler import FProfiler
 
 fromLayoutMgrFunc_3D_seq = [
     lambda a, shape: a.reshape(shape, order=ORDER)[...],
@@ -63,8 +63,7 @@ class GPUDiscreteField(DiscreteField):
     Allocates OpenCL device memory for the field.
     """
     def __init__(self, cl_env, topology=None, isVector=False, name="?",
-                 precision=PARMES_REAL, layout=True,
-                 batch_nb=None, batch_d=0):
+                 precision=PARMES_REAL, layout=True, simple_layout=False):
         """
         Constructor.
         @param queue : OpenCL queue
@@ -74,10 +73,9 @@ class GPUDiscreteField(DiscreteField):
         @param name : Field name
         @param idFromParent : Index in the parent's discrete fields
         @param layout : Boolean indicating if components are arranged in memory
-        @param batch_nb : array of integer to specify how to subdivise the
-        whole data to work with on the device. Default : [1]*dim
-        @param batch_d : direction concerned by the batch_nb array.
         Defaut : all components are considered in the same way.
+        @param simple_layout : Boolean indicating if in the Z direction,
+        layout is ZYX (simple) or ZXY.
         @see parmepy.fields.vector.VectorField.__init__
         """
         super(GPUDiscreteField, self).__init__(topology, isVector, name)
@@ -96,126 +94,62 @@ class GPUDiscreteField(DiscreteField):
         self.gpu_allocated = False
         ## OpenCL Events list modifying this field
         self.events = []
+
+        # Get the process number involved in this field discretisation
+        # By default, all mpi process are take, otherwise, user create and
+        # gives his own topologies.
+        if topology is None:
+            from parmepy.mpi.main_var import main_rank
+            self._rank = main_rank
+        else:
+            self._rank = topology.rank
+
         ## Data layout is direction dependant
         self.layout = layout
+        ## Layout for the Z direction
+        self.simple_layout = simple_layout
         ## Layout and shape managers
         if self.domain.dimension == 3:
-            if main_size == 1:
-                self._shapeFunc = shapeFunc_3D_seq
-                self._fromLayoutMgrFunc = fromLayoutMgrFunc_3D_seq
-                self._toLayoutMgrFunc = toLayoutMgrFunc_3D_seq
-            else:
+            if self.simple_layout:
                 self._shapeFunc = shapeFunc_3D
                 self._fromLayoutMgrFunc = fromLayoutMgrFunc_3D
                 self._toLayoutMgrFunc = toLayoutMgrFunc_3D
+            else:
+                self._shapeFunc = shapeFunc_3D_seq
+                self._fromLayoutMgrFunc = fromLayoutMgrFunc_3D_seq
+                self._toLayoutMgrFunc = toLayoutMgrFunc_3D_seq
         else:
             self._shapeFunc = shapeFunc_2D
             self._fromLayoutMgrFunc = fromLayoutMgrFunc_2D
             self._toLayoutMgrFunc = toLayoutMgrFunc_2D
 
-        ## Flag telling if the data are subdivised or not
-        self.isBatch = np.prod(batch_nb) > 1
-        ## Batch number that is in the device memory
-        self.batch_on_device = [-1] * self.nbComponents
-        ## Is the data are on device
-        self.batch_is_on_device = [False] * self.nbComponents
-        ## Shape of one batch
-        self.shape_gpu = [None] * self.domain.dimension
-        ## Batch number
-        self.batch_nb = [None] * self.domain.dimension
-        ## Shift in index going from one batch to an other
-        self.batch_shift = [None] * self.domain.dimension
-        ## List of slices to access all batches
-        self.batch_slices = [None] * self.domain.dimension
-        ## Default shape (no batch)
-        self.shape_gpu_default = None
-        ## Default slice (no batch)
-        self.batch_slices_default = None
-        if self.isBatch:
-            self.shape_gpu_default = np.asarray(
-                self.data[0].shape, dtype=PARMES_INTEGER) / batch_nb
-            self.batch_slices_default = tuple(
-                [slice(0, nb) for nb in self.shape_gpu_default])
-            self.batch_setup(batch_nb, batch_d)
-            self.allocate = self._allocate_withBatch
-        else:
-            self.allocate = self._allocate_noBatch
-        self.toHost_timer = ManualFunctionTimer(
-            name="OpenCL_Transfer_toHost")
-        self.toDevice_timer = ManualFunctionTimer(
-            name="OpenCL_Transfer_toDevice")
-
-    def batch_setup(self, batch_nb=None, batch_d=0):
-        """Batch configuration.
-        @param batch_nb : array of integer to specify how to subdivise the
-        whole data to work with on the device. Default : [1]*dim
-        @param batch_d : direction concerned by the batch_nb array.
-        Defaut : all components are considered in the same way.
-        """
-        # Batch configuration
-        if batch_nb is None:
-            batch_nb = np.ones((self.domain.dimension, ),
-                               dtype=PARMES_INTEGER)
-        self.batch_nb[batch_d] = batch_nb
-        shape_host = np.asarray(self.data[0].shape, dtype=PARMES_INTEGER)
-        shape_host -= 2 * self.topology.ghosts
-        self.shape_gpu[batch_d] = shape_host / batch_nb
-        self.batch_shift[batch_d] = np.zeros((self.domain.dimension, ),
-                                             dtype=PARMES_INTEGER)
-        self.batch_shift[batch_d][batch_nb > 1] = \
-            self.shape_gpu[batch_d][batch_nb > 1]
-        self.batch_slices[batch_d] = []
-        gh_batch_d = self.topology.ghosts[batch_d]
-        for i in xrange(np.prod(batch_nb)):
-            s = [slice(i*bs, nb+i*bs+2*gh_batch_d) for nb, bs
-                 in zip(self.shape_gpu[batch_d], self.batch_shift[batch_d])]
-            self.batch_slices[batch_d].append(tuple(s))
-        self.shape_gpu[batch_d] += 2 * self.topology.ghosts
+        self.profiler += FProfiler("Transfer_toHost")
+        self.profiler += FProfiler("Transfer_toDevice")
+        ## Transfer size counter (to device)
+        self.to_dev_size = 0.
+        ## Transfer size counter (to host)
+        self.to_host_size = 0.
 
-    def _allocate_noBatch(self):
+        ## Temporary cpu buffer to change data layout between cpu ang gpu
+        self.host_data_pinned = [None, ] * self.nbComponents
+        
+    def allocate(self):
         """Device memory allocations no batch."""
         if not self.gpu_allocated:
-            self._tmp = zeros(np.prod(self.data[0].shape),
-                              dtype=self.precision)
+            evt = [None, ] * self.nbComponents
             for d in xrange(self.nbComponents):
                 self.data[d] = np.asarray(self.data[d],
                                           dtype=self.precision, order=ORDER)
-                self.gpu_data[d] = clArray.empty(
-                    self.cl_env.queue, self.data[d].shape, self.precision,
-                    order=ORDER, allocator=self.cl_env.memPool)
-                self.mem_size += self.gpu_data[d].nbytes
-                self.cl_env.available_mem -= self.gpu_data[d].nbytes
-            self.gpu_allocated = True
-            if __VERBOSE__:
-                print self.name, self.mem_size, "Bytes (",
-                print self.mem_size / (1024 ** 2), "MB)"
-
-    def _allocate_withBatch(self):
-        """Device memory allocations with batch."""
-        if not self.gpu_allocated:
-            max_gpu_shape = self.shape_gpu[0]
-            for s in self.shape_gpu[1:]:
-                if np.prod(max_gpu_shape) < np.prod(s):
-                    max_gpu_shape = s
-            self._tmp = zeros(np.prod(max_gpu_shape),
-                              dtype=self.precision)
+                self.gpu_data[d] = self.cl_env.global_allocation(self.data[d])
+                self.mem_size += self.gpu_data[d].size
+                self.host_data_pinned[d], evt[d] = cl.enqueue_map_buffer(
+                    self.cl_env.queue,
+                    self.gpu_data[d],
+                    offset=0, shape=(int(np.prod(self.data[0].shape)), ),
+                    flags=cl.map_flags.READ | cl.map_flags.WRITE,
+                    dtype=PARMES_REAL, is_blocking=False, order=ORDER)
             for d in xrange(self.nbComponents):
-                self.data[d] = np.asarray(self.data[d],
-                                          dtype=self.precision, order=ORDER)
-                # If layout, each component of this vector are used only in
-                # directions of component. So it may have different sizes along
-                # components. Otherwise, each components (if any) are used
-                # in all directions, thus allocating a buffer of max size.
-                if self.layout:
-                    shape = self.shape_gpu[d]
-                else:
-                    shape = max_gpu_shape
-                if shape is not None:
-                    self.gpu_data[d] = clArray.empty(
-                        self.cl_env.queue, tuple(shape), self.precision,
-                        order=ORDER, allocator=self.cl_env.memPool)
-                    self.mem_size += self.gpu_data[d].nbytes
-                    self.cl_env.available_mem -= self.gpu_data[d].nbytes
+                evt[d].wait()
             self.gpu_allocated = True
             if __VERBOSE__:
                 print self.name, self.mem_size, "Bytes (",
@@ -223,7 +157,7 @@ class GPUDiscreteField(DiscreteField):
 
     @classmethod
     def fromField(cls, cl_env, vfield, precision=PARMES_REAL,
-                  layout=True, batch_nb=None, batch_d=0):
+                  layout=True, simple_layout=False):
         """
         Contructor from a discrete vector field.
         Mutates the given VectorField to a GPUVectorField.
@@ -232,19 +166,15 @@ class GPUDiscreteField(DiscreteField):
         @param vfield : VectorField
         @param precision : Floating point precision
         @param layout : Boolean indicating if components are arranged in memory
-        @param batch_nb : array of integer to specify how to subdivise the
-        @param batch_d : direction concerned by the batch_nb array.
+        @param simple_layout : Boolean indicating if in the Z direction,
+        layout is ZYX (simple) or ZXY.
         """
         if not isinstance(vfield, GPUDiscreteField):
             vfield.__class__ = cls
             GPUDiscreteField.__init__(
                 vfield, cl_env,
                 vfield.topology, vfield.isVector, vfield.name,
-                precision, layout,
-                batch_nb=batch_nb, batch_d=batch_d)
-        else:
-            if vfield.isBatch:
-                vfield.batch_setup(batch_nb, batch_d)
+                precision, layout, simple_layout)
 
     def setInitializationKernel(self, kernel):
         """
@@ -254,13 +184,13 @@ class GPUDiscreteField(DiscreteField):
         self.init_kernel = kernel
 
     @debug
-    def dump(self, filename, mode=None):
+    def dump(self, filename):
         """
         @remark Synchronized OpenCL calls (waiting for event(s) completion)
         """
         self.toHost()
         self.wait()
-        DiscreteField.dump(self, filename, mode)
+        DiscreteField.dump(self, filename)
 
     @debug
     def load(self, filename, fieldname=None):
@@ -271,7 +201,7 @@ class GPUDiscreteField(DiscreteField):
         self.toDevice()
 
     @debug
-    def initialize(self, formula=None, doVectorize=False, currentTime=0.,
+    def initialize(self, formula=None, doVectorize=False, time=0.,
                    *args):
         """
         GPU data initialization.
@@ -283,48 +213,44 @@ class GPUDiscreteField(DiscreteField):
         @param args : formula extra parameters
         @remark Synchronized OpenCL calls (waiting for event(s) completion)
         """
-        t = self.precision(currentTime)
+        t = self.precision(time)
         if __VERBOSE__:
-            print "{"+str(main_rank)+"}", "Initialize", self.name
+            print "{" + str(self._rank) + "}", "Initialize", self.name
         isGPUKernel = isinstance(formula, KernelLauncher) \
             or isinstance(formula, KernelListLauncher)
         if not isGPUKernel and self.init_kernel is None:
-            DiscreteField.initialize(self, formula, False, currentTime, *args)
+            DiscreteField.initialize(self, formula, False, time, *args)
             for d in xrange(self.nbComponents):
                 self.data[d] = np.asarray(
                     self.data[d],
                     dtype=self.precision, order=ORDER)
             self.toDevice()
         else:
-            if self.isBatch:
-                assert np.prod([np.prod(b) for b in self.batch_nb]) == 1,\
-                    "Initilization with kernel is impossible with batch. \
-                Use Python formula instead."
             if isGPUKernel:
                 self.init_kernel = formula
             coord_min = np.ones(4, dtype=self.precision)
             mesh_size = np.ones(4, dtype=self.precision)
-            coord_min[0:self.dimension] = np.asarray(
+            coord_min[:self.dimension] = np.asarray(
                 self.topology.mesh.origin,
                 dtype=self.precision)
-            mesh_size[0:self.dimension] = np.asarray(
+            mesh_size[:self.dimension] = np.asarray(
                 self.topology.mesh.space_step,
                 dtype=self.precision)
             if self.nbComponents == 2:
-                evt = self.init_kernel(self.gpu_data[0].data,
-                                       self.gpu_data[1].data,
+                evt = self.init_kernel(self.gpu_data[0],
+                                       self.gpu_data[1],
                                        coord_min, mesh_size, t,
                                        *args,
                                        wait_for=self.events)
             elif self.nbComponents == 3:
-                evt = self.init_kernel(self.gpu_data[0].data,
-                                       self.gpu_data[1].data,
-                                       self.gpu_data[2].data,
+                evt = self.init_kernel(self.gpu_data[0],
+                                       self.gpu_data[1],
+                                       self.gpu_data[2],
                                        coord_min, mesh_size, t,
                                        *args,
                                        wait_for=self.events)
             else:
-                evt = self.init_kernel(self.gpu_data[0].data,
+                evt = self.init_kernel(self.gpu_data[0],
                                        coord_min, mesh_size, t,
                                        *args,
                                        wait_for=self.events)
@@ -337,14 +263,16 @@ class GPUDiscreteField(DiscreteField):
                 print " (" + str(self.mem_size / (1024. ** 2)) + " MBytes)"
             self.wait()
             for d in xrange(self.nbComponents):
-                self.gpu_data[d].data.release()
-                self.cl_env.available_mem += self.gpu_data[d].nbytes
-            if self.init_kernel is not None:
-                for f_timer in self.init_kernel.f_timer:
-                    self.timer.addFunctionTimer(f_timer)
+                self.host_data_pinned[d].base.release(self.cl_env.queue)
+                self.cl_env.global_deallocation(self.gpu_data[d])
             self._isReleased = True
 
-    def toDevice(self, component=None, layoutDir=None, batch_d=None):
+    def get_profiling_info(self):
+        if self.init_kernel is not None:
+            for p in self.init_kernel.profile:
+                self.profiler += p
+
+    def toDevice(self, component=None, layoutDir=None):
         """
         Host to device method.
         @param component : Component to consider (Default : all components)
@@ -372,83 +300,37 @@ class GPUDiscreteField(DiscreteField):
             range_components = [component]
             evt = [None]
 
-        #Ghosts Synchronization before sending
-        self.synchro_ghosts()
-
-        self.wait()
         mem_transfered = 0
         for d_id, d in enumerate(range_components):
-            if d_id > 0 and evt[d-1] is not None:
-                evt[d-1].wait()
             if self.layout:
                 layoutDir = d
-            if not self.isBatch:
-                if layoutDir is None:
-                    layoutDir = 0
-                if __VERBOSE__:
-                    print "{"+str(main_rank)+"}", "host->device :", \
-                        self.name, S_DIR[d], layoutDir, 'No Batch'
-                self._tmp[...] = self._toLayoutMgrFunc[layoutDir](self.data[d])
-                evt[d_id] = cl.enqueue_copy(self.cl_env.queue,
-                                            self.gpu_data[d].data, self._tmp)
-                mem_transfered += self.gpu_data[d].nbytes
-                self.events.append(evt[d_id])
-            else:
-                if (self.batch_on_device[d] == batch_d) and \
-                        self.batch_is_on_device[d]:
-                    continue
-                if layoutDir is None and batch_d is None:
-                    if __VERBOSE__:
-                        print "{"+str(main_rank)+"}", "host->device :", \
-                            self.name, S_DIR[d], 0, 'Batch_default'
-                    # Default transfer, using the shape of the gpu_data
-                    # component and the default layout
-                    self._tmp[:np.prod(self.shape_gpu_default)] = \
-                        self._toLayoutMgrFunc[0](
-                            self.data[d][self.batch_slices_default])
-                    evt[d_id] = cl.enqueue_copy(
-                        self.cl_env.queue,
-                        self.gpu_data[d].data,
-                        self._tmp[:np.prod(self.shape_gpu_default)])
-                    self.batch_on_device[d] = 0
-                    self.batch_is_on_device[d] = True
-                    mem_transfered += self.gpu_data[d].nbytes
-                    self.events.append(evt[d_id])
-                else:
-                    batch_dir = batch_d
-                    if batch_d is None:
-                        batch_dir = 0
-                    if __VERBOSE__:
-                        print "{"+str(main_rank)+"}", "host->device :", \
-                            self.name, S_DIR[d], layoutDir, 'Batch', batch_dir
-                    if self.shape_gpu[layoutDir] is not None:
-                        self._tmp[:np.prod(self.shape_gpu[layoutDir])] = \
-                            self._toLayoutMgrFunc[layoutDir](
-                                self.data[d][
-                                    self.batch_slices[layoutDir][batch_dir]])
-                        evt[d_id] = cl.enqueue_copy(
-                            self.cl_env.queue,
-                            self.gpu_data[d].data,
-                            self._tmp[:np.prod(self.shape_gpu[layoutDir])])
-                        self.batch_on_device[d] = batch_dir
-                        self.batch_is_on_device[d] = True
-                        mem_transfered += self.gpu_data[d].nbytes
-                        self.events.append(evt[d_id])
-
+            if layoutDir is None:
+                layoutDir = 0
+            if __VERBOSE__:
+                print "{" + str(self._rank) + "}", "host->device :", \
+                    self.name, S_DIR[d], layoutDir
+            self.host_data_pinned[d][...] = \
+                self._toLayoutMgrFunc[layoutDir](self.data[d])
+            evt[d_id] = cl.enqueue_copy(
+                self.cl_env.queue, self.gpu_data[d], self.host_data_pinned[d],
+                is_blocking=False)
+            mem_transfered += self.gpu_data[d].size
+        for e in evt:
+            self.events.append(e)
         time = 0.
+        self.to_dev_size += mem_transfered / (1024. ** 3)
         if CL_PROFILE:
             for e in evt:
                 if e is not None:
                     e.wait()
                     time += (e.profile.end - e.profile.start) * 1e-9
-            self.toDevice_timer.append_time(time)
-        if __VERBOSE__:
-            if CL_PROFILE:
-                print self.mem_size, "Bytes transfered at ",
-                print "{0:.3f} GBytes/sec".format(
-                    mem_transfered / (time * 1024 ** 3))
+            self.profiler['Transfer_toDevice'] += time
+        if __VERBOSE__ and CL_PROFILE:
+            print self.mem_size, "Bytes transfered at ",
+            print "{0:.3f} GBytes/sec".format(
+                mem_transfered / (time * 1024 ** 3))
 
-    def toHost(self, component=None, layoutDir=None, batch_d=None):
+    def toHost(self, component=None, layoutDir=None):
         """
         Device to host method.
         @param component : Component to consider (Default : all components)
@@ -474,69 +356,32 @@ class GPUDiscreteField(DiscreteField):
         for d_id, d in enumerate(range_components):
             if self.layout:
                 layoutDir = d
-            if not self.isBatch:
-                if layoutDir is None:
-                    layoutDir = 0
-                if __VERBOSE__:
-                    print "{"+str(main_rank)+"}", "device->host :", \
-                        self.name, S_DIR[d], layoutDir, 'No Batch'
-                evt[d_id] = cl.enqueue_copy(self.cl_env.queue,
-                                            self._tmp,
-                                            self.gpu_data[d].data,
-                                            wait_for=self.events)
-                mem_transfered += self.gpu_data[d].nbytes
-                evt[d_id].wait()
-                shape = self._shapeFunc[layoutDir](self.data[d].shape)
-                self.data[d][...] = self._fromLayoutMgrFunc[layoutDir](
-                    self._tmp, shape)
-            elif self.batch_is_on_device[d]:
-                if layoutDir is None and batch_d is None:
-                    if __VERBOSE__:
-                        print "{"+str(main_rank)+"}", "device->host :", \
-                            self.name, S_DIR[d], 0, 'Batch_default'
-                    # Default transfer, using the shape of the gpu_data
-                    # component and the default layout
-                    evt[d_id] = cl.enqueue_copy(
-                        self.cl_env.queue,
-                        self._tmp[:np.prod(self.shape_gpu_default)],
-                        self.gpu_data[d].data,
-                        wait_for=self.events)
-                    mem_transfered += self.gpu_data[d].nbytes
-                    evt[d_id].wait()
-                    shape = self._shapeFunc[0](self.shape_gpu_default)
-                    self.data[d][self.batch_slices_default] = \
-                        self._fromLayoutMgrFunc[0](
-                            self._tmp[:np.prod(self.shape_gpu_default)],
-                            shape)
-                    self.batch_is_on_device[d] = False
-                else:
-                    if batch_d is None:
-                        batch_d = 0
-                    if __VERBOSE__:
-                        print "{"+str(main_rank)+"}", "device->host :", \
-                            self.name, S_DIR[d], layoutDir, 'Batch', batch_d
-                    evt[d_id] = cl.enqueue_copy(
-                        self.cl_env.queue,
-                        self._tmp[:np.prod(self.shape_gpu[layoutDir])],
-                        self.gpu_data[d].data,
-                        wait_for=self.events)
-                    mem_transfered += self.gpu_data[d].nbytes
-                    evt[d_id].wait()
-                    shape = self._shapeFunc[layoutDir](
-                        self.shape_gpu[layoutDir])
-                    self.data[d][self.batch_slices[layoutDir][batch_d]] = \
-                        self._fromLayoutMgrFunc[layoutDir](
-                            self._tmp[:np.prod(self.shape_gpu[layoutDir])],
-                            shape)
-                    self.batch_is_on_device[d] = False
-
+            if layoutDir is None:
+                layoutDir = 0
+            if __VERBOSE__:
+                print "{" + str(self._rank) + "}", "device->host :", \
+                    self.name, S_DIR[d], layoutDir
+            evt[d_id] = cl.enqueue_copy(self.cl_env.queue,
+                                        self.host_data_pinned[d],
+                                        self.gpu_data[d],
+                                        wait_for=self.events,
+                                        is_blocking=False)
+            mem_transfered += self.gpu_data[d].size
+        for d_id, d in enumerate(range_components):
+            shape = self._shapeFunc[layoutDir](self.data[d].shape)
+            evt[d_id].wait()
+            self.data[d][...] = self._fromLayoutMgrFunc[layoutDir](
+                self.host_data_pinned[d], shape)
+        for e in evt:
+            self.events.append(e)
         time = 0.
+        self.to_host_size += mem_transfered / (1024. ** 3)
         if CL_PROFILE:
             for e in evt:
                 if e is not None:
                     e.wait()
                     time += (e.profile.end - e.profile.start) * 1e-9
-            self.toHost_timer.append_time(time)
+            self.profiler['Transfer_toHost'] += time
         if __VERBOSE__:
             if CL_PROFILE:
                 print self.mem_size, "Bytes transfered at ",
@@ -549,7 +394,7 @@ class GPUDiscreteField(DiscreteField):
         Resets the events list.
         """
         if __VERBOSE__:
-            print "{"+str(main_rank)+"}", "Wait events :", self.name
+            print "{" + str(self._rank) + "}", "Wait events :", self.name
         for e in self.events:
             e.wait()
         self.events = []
@@ -560,10 +405,10 @@ class GPUDiscreteField(DiscreteField):
         Resets the events list.
         """
         if __VERBOSE__:
-            print "{"+str(main_rank)+"}", "Clean events :", \
+            print "{" + str(self._rank) + "}", "Clean events :", \
                 self.name, len(self.events)
         c = cl.command_execution_status.COMPLETE
         for e in self.events:
             e.wait()
         self.events = [e for e in self.events
-                       if not e.command_execution_status is c]
+                       if e.command_execution_status != c]
diff --git a/HySoP/hysop/gpu/gpu_particle_advection.py b/HySoP/hysop/gpu/gpu_particle_advection.py
index 1afc50a54ee32b4ab4c4a919f0017a22210fbb6d..0db231d59e69884dc0dc4058ddc6ac14fd36f19f 100644
--- a/HySoP/hysop/gpu/gpu_particle_advection.py
+++ b/HySoP/hysop/gpu/gpu_particle_advection.py
@@ -8,20 +8,24 @@ from parmepy import __VERBOSE__
 from parmepy.constants import np, debug, PARMES_INDEX, S_DIR, \
     PARMES_REAL, PARMES_INTEGER
 from parmepy.methods_keys import TimeIntegrator, Interpolation, Remesh, \
-    Support, Splitting, Precision
+    Support, Splitting, Precision, MultiScale
 from parmepy.numerics.integrators.runge_kutta2 import RK2
+from parmepy.numerics.integrators.euler import Euler
 from parmepy.numerics.interpolation import Linear
 from parmepy.numerics.remeshing import L2_1
 from parmepy.operator.discrete.particle_advection import ParticleAdvection
 from parmepy.gpu import cl
-from parmepy.gpu.tools import get_opencl_environment
 from parmepy.gpu.gpu_kernel import KernelLauncher
-from parmepy.tools.timers import Timer
 from parmepy.operator.discrete.discrete import DiscreteOperator
 from parmepy.mpi import MPI
+import parmepy.default_methods as default
+import parmepy.tools.numpywrappers as npw
+from parmepy.gpu.gpu_discrete import GPUDiscreteField
+from parmepy.gpu.gpu_operator import GPUOperator
+from parmepy.tools.profiler import ftime, profile
 
 
-class GPUParticleAdvection(ParticleAdvection):
+class GPUParticleAdvection(ParticleAdvection, GPUOperator):
     """
     Particle advection operator representation on GPU.
 
@@ -29,14 +33,8 @@ class GPUParticleAdvection(ParticleAdvection):
     __metaclass__ = ABCMeta
 
     @debug
-    @abstractmethod
-    def __init__(self, velocity, advectedFields, d,
-                 part_position=None, part_advectedFields=None,
-                 platform_id=None, device_id=None,
-                 device_type=None,
-                 method=None,
-                 src=None, batch_nb=None,
-                 isMultiScale=False):
+    def __init__(self, platform_id=None, device_id=None, device_type=None,
+                 user_src=None, max_velocity=None, max_dt=None, **kwds):
         """
         Create a Advection operator.
         Work on a given field (scalar or vector) at a given velocity to compute
@@ -45,335 +43,270 @@ class GPUParticleAdvection(ParticleAdvection):
         directional splitting with resolution non uniform in directions.
 
         @param velocity : Velocity field
-        @param advectedFields : Advected fields
+        @param fields_on_grid : Advected fields
         @param d : Direction to advect
         @param platform_id : OpenCL platform id (default = 0).
         @param device_id : OpenCL device id (default = 0).
         @param device_type : OpenCL device type (default = 'gpu').
         @param method : the method to use. {'m4prime', 'm6prime', 'm8prime',
         'l6star'}
-        @param src : User OpenCL sources.
+        @param user_src : User OpenCL sources.
         @param splittingConfig : Directional splitting configuration
         (parmepy.numerics.splitting.Splitting.__init__)
         """
-        if method is None:
-            method = {TimeIntegrator: RK2,
-                      Interpolation: Linear,
-                      Remesh: L2_1,
-                      Support: 'gpu_2k',
-                      Splitting: 'o2',
-                      Precision: PARMES_REAL}
-        ParticleAdvection.__init__(self, velocity, advectedFields, d,
-                                   part_position, part_advectedFields, method,
-                                   isMultiScale=isMultiScale)
-        self.method = method
-        if not Precision in self.method.keys():
-            self.method[Precision]=PARMES_REAL
-        self.user_gpu_src = src
-        self.num_method = None
-        self.dim = self.advectedFields[0].dimension
-        self.cl_env = get_opencl_environment(
-            platform_id=platform_id, device_id=device_id,
-            device_type=device_type, precision=self.method[Precision],
-            comm=self.advectedFields[0].topology.comm)
-        self._main_rank = self.advectedFields[0].topology.comm.Get_rank()
-        self._main_size = self.advectedFields[0].topology.comm.Get_size()
-
-        resol = self.advectedFields[0].topology.mesh.resolution
-        v_resol = self.velocity.topology.mesh.resolution
-        shape = np.ones((self.dim,), dtype=PARMES_INTEGER)
-        shape[:self.dim] = resol[...]
-        v_shape = np.ones((self.dim,), dtype=PARMES_INTEGER)
-        v_shape[:self.dim] = v_resol[...]
-        self.batch_nb = np.ones((self.dim,), dtype=PARMES_INTEGER)
-        if self.dir == self.dim - 1:
-            batch_dir = 0
-        else:
-            batch_dir = self.dim - 1
-        gh_velo = self.velocity.topology.ghosts
-        gh_adv = self.advectedFields[0].topology.ghosts
-        if batch_nb is None:
-            # Automatic computation of batch number regarding available mem
-            # Ghosts are not divided by 2: shape = (shape - 2*gh)/2 + 2*gh
-            prev_mem_use = self.cl_env.gpu_comm.allreduce(
-                self.globalMemoryUsagePreview(v_shape, shape))
-            while prev_mem_use >= self.cl_env.available_mem:
-                shape[batch_dir] /= 2
-                shape[batch_dir] += gh_adv[batch_dir]
-                v_shape[batch_dir] /= 2
-                v_shape[batch_dir] += gh_velo[batch_dir]
-                self.batch_nb[batch_dir] *= 2
-                prev_mem_use = self.cl_env.gpu_comm.allreduce(
-                    self.globalMemoryUsagePreview(v_shape, shape))
-        else:
-            # User batch number
-            shape[batch_dir] /= batch_nb
-            shape[batch_dir] += gh_adv[batch_dir]
-            v_shape[batch_dir] /= batch_nb
-            v_shape[batch_dir] += gh_velo[batch_dir]
-            self.batch_nb[batch_dir] = batch_nb
-        if np.prod(self.batch_nb) > 1 and self._main_rank == 0:
-            print "Using batch for variables:", self.batch_nb
-
-        # Functions to get the appropriate vectors for the current direction
-        self._reorderVect = lambda v: v
-        if self.dim == 2 and self.dir == 1:
-            self._reorderVect = lambda v: (v[1], v[0])
-        if self.dim == 3 and self.dir == 1:
-            self._reorderVect = lambda v: (v[1], v[0], v[2])
-        if self.dim == 3 and self.dir == 2:
-            if self._main_size == 1 and self.method[Splitting].find('o2') >= 0:
-                self._reorderVect = lambda v: (v[2], v[0], v[1])
-            else:
-                self._reorderVect = lambda v: (v[2], v[1], v[0])
+        # Set default method if unknown
+        if 'method' not in kwds:
+            kwds['method'] = default.ADVECTION
+            kwds['method'][Support] = 'gpu_2k'
+
+        # init base class
+        super(GPUParticleAdvection, self).__init__(**kwds)
+        self.fields_topo = self.fields_on_grid[0].topology
+        self.velocity_topo = self.velocity.topology
+        self._comm = self.fields_topo.comm
+        self._comm_size = self._comm.Get_size()
+        self._comm_rank = self._comm.Get_rank()
+
+        # init the second base class (the previous super only call
+        # the fist __init__ method found in the order of
+        # [ParticleAdvection, GPUOperator], i.e. ParticleAdvection.__init__)
+        # the GPUOperator.__init__ must be explicitely called.
+        # see http://stackoverflow.com/questions/3277367/how-does-pythons-super-work-with-multiple-inheritance
+        GPUOperator.__init__(self, platform_id=platform_id,
+                             device_id=device_id,
+                             device_type=device_type,
+                             user_src=user_src, **kwds)
+
+        ## Work arrays for fields on particles (cpu)
+        self.fields_on_part = None
+
+        # The default is one kernel for all operations
+        self._is2kernel = False
+        if self.method[Support].find('gpu_2k') >= 0:
+            # different kernels for advection and remesh
+            self._is2kernel = True
+
+        self._isMultiScale = False
+        if MultiScale in self.method:
+            if self.method[MultiScale] is not None:
+                self._isMultiScale = True
+        
+        if self._isMultiScale:
+            self._synchronize = True
+
         # Compute resolutions for kernels for each direction.
-        ## Resolution of the local mesh
-        self.resol = np.ones((3,), dtype=PARMES_INDEX)
-        self.v_resol = np.ones((3,), dtype=PARMES_INDEX)
         ## Resolution of the local mesh but reoganized redarding
         ## splitting direction:
         ## direction X : XYZ
         ## direction Y : YXZ
         ## direction Z : ZYX in parallel, ZXY in sequentiel.
-        self.resol_dir = np.ones((3,), dtype=PARMES_INDEX)
-        self.v_resol_dir = np.ones((3,), dtype=PARMES_INDEX)
-        self.resol[:self.dim] = shape[...]
+        self.resol_dir = npw.dim_ones((self.dim,))
+        self.v_resol_dir = npw.dim_ones((self.dim,))
+        shape = self.fields_topo.mesh.resolution
+        v_shape = self.velocity_topo.mesh.resolution
+        # Local mesh resolution
+        resol = shape.copy()
         self.resol_dir[:self.dim] = self._reorderVect(shape)
-        self.v_resol[:self.dim] = v_shape[...]
+        v_resol = v_shape.copy()
         self.v_resol_dir[:self.dim] = self._reorderVect(v_shape)
 
-        # Size constants for local mesh size
-        self._size_constants = ""
-        for i in xrange(3):
-            self._size_constants += " -D NB" + S_DIR[i]
-            self._size_constants += "=" + str(self.resol[i])
-        for i in xrange(3):
-            self._size_constants += " -D V_NB" + S_DIR[i]
-            self._size_constants += "=" + str(self.v_resol[i])
-        self._size_constants += " -D V_GHOSTS_NB="
-        self._size_constants += str(self.velocity.topology.ghosts[0])
-        # Direction dependant constants
-        self._constants = [
-            " -D NB_I=NB_X -D NB_II=NB_Y -D NB_III=NB_Z " +
-            "-D V_NB_I=V_NB_X -D V_NB_II=V_NB_Y -D V_NB_III=V_NB_Z",
-            " -D NB_I=NB_Y -D NB_II=NB_X -D NB_III=NB_Z " +
-            "-D V_NB_I=V_NB_Y -D V_NB_II=V_NB_X -D V_NB_III=V_NB_Z",
-            " -D NB_I=NB_Z -D NB_II=NB_Y -D NB_III=NB_X " +
-            "-D V_NB_I=V_NB_Z -D V_NB_II=V_NB_Y -D V_NB_III=V_NB_X"]
-        if self._main_size == 1 and self.method[Splitting].find('o2') >= 0:
-            self._constants[2] = \
-                " -D NB_I=NB_Z -D NB_II=NB_X -D NB_III=NB_Y " + \
-                "-D V_NB_I=V_NB_Z -D V_NB_II=V_NB_X -D V_NB_III=V_NB_Y"
+        self._append_size_constants(resol)
+        self._append_size_constants(v_resol, prefix='V_NB')
+        self._append_size_constants(
+            [self.velocity_topo.ghosts()[self.direction]],
+            prefix='V_GHOSTS_NB', suffix=[''])
+        enum = ['I', 'II', 'III']
+        self._append_size_constants(
+            self._reorderVect(['NB' + d for d in S_DIR[:self.dim]]),
+            prefix='NB_', suffix=enum[:self.dim])
+        self._append_size_constants(
+            self._reorderVect(['V_NB' + d for d in S_DIR[:self.dim]]),
+            prefix='V_NB_', suffix=enum[:self.dim])
+
+        fields_topo = self.fields_topo
+        # Coordinates of the local origin
+        self._coord_min = npw.ones(4, dtype=self.gpu_precision)
+        self._coord_min[:self.dim] = fields_topo.mesh.origin
+
+        # Space step for fields
+        self._mesh_size = npw.ones(4, dtype=self.gpu_precision)
+        self._mesh_size[:self.dim] = self._reorderVect(
+            self.fields_topo.mesh.space_step)
+
+        # Space step for velocity
+        self._v_mesh_size = npw.ones(4, dtype=self.gpu_precision)
+        self._v_mesh_size[:self.dim] = self._reorderVect(
+            self.velocity_topo.mesh.space_step)
+
+        self._mesh_info = npw.ones((12, ))
+        self._mesh_info[:4] = self._mesh_size
+        self._mesh_info[4:8] = self._v_mesh_size
+        self._mesh_info[8] = self._coord_min[self.direction]
+        self._mesh_info[9] = 1. / self._mesh_size[0]
+        self._mesh_info[10] = 1. / self._v_mesh_size[0]
+        self._cl_mesh_info = cl.Buffer(self.cl_env.ctx, cl.mem_flags.READ_ONLY,
+                                       size=self._mesh_info.nbytes)
+        cl.enqueue_write_buffer(self.cl_env.queue,
+                                self._cl_mesh_info, self._mesh_info).wait()
+
+        assert self._coord_min.dtype == self.gpu_precision
+        assert self._mesh_size.dtype == self.gpu_precision
+        assert self._v_mesh_size.dtype == self.gpu_precision
+
+        ## opencl kernels build options
+        self.build_options = ""
 
+        # user defined opencl sources
         self.prg = None
-        ## Object to store computational times of OpenCL kernels
-        self.kernels_timer = Timer(self)
-        self._num_locMem = None
-        precision = self.method[Precision]
-        self._kernel_cfg = self.cl_env.kernels_config[self.dim][precision]
+        self._collect_usr_cl_src(user_src)
+
+        # Set copy kernel
         self.copy = None
+        self._collect_kernels_cl_src_copy()
+
+        # Set transposition kernels
         self.transpose_xy, self.transpose_xy_r = None, None
         self.transpose_xz, self.transpose_xz_r = None, None
-        ## Global memory allocated on gpu by this operator
-        self.size_global_alloc = 0
-        ## Local memory allocated on gpu by this operator
-        self.size_local_alloc = 0
-
-    @debug
-    def setUp(self):
-        """
-        Set up.
-        Compute OpenCL work-item number and space index.\n
-        Compile OpenCL sources.
-        Allocates OpenCL buffers. Buffers are initialized with OpenCL kernels
-        if named as follows : <code>init"FieldName"</code> with
-        <code>"FieldName"</code> the field
-        name as given by user.
-        """
-        self.gpu_precision = self.cl_env.precision
-        self.coord_min = np.ones(4, dtype=self.gpu_precision)
-        self.mesh_size = np.ones(4, dtype=self.gpu_precision)
-        self.v_mesh_size = np.ones(4, dtype=self.gpu_precision)
-        self.coord_min[:self.dim] = np.asarray(
-            self.advectedFields[0].topology.mesh.origin,
-            dtype=self.gpu_precision)
-        self.mesh_size[:self.dim] = self._reorderVect(np.asarray(
-            self.advectedFields[0].topology.mesh.space_step,
-            dtype=self.gpu_precision))
-        self.v_mesh_size[:self.dim] = self._reorderVect(np.asarray(
-            self.velocity.topology.mesh.space_step,
-            dtype=self.gpu_precision))
-        self.build_options = ""
-        self._collect_usr_cl_src(self.user_gpu_src)
-        self._collect_kernels_cl_src_copy()
         self._collect_kernels_cl_src_transpositions_xy()
         if self.dim == 3:
             self._collect_kernels_cl_src_transpositions_xz()
-        self._collect_kernels_cl_src()
-        if __VERBOSE__:
-            print "=== OpenCL Buffer allocations ==="
+
+        # Set advection and remesh kernels
+        self.num_advec, self.num_remesh = None, None
+        self.num_advec_and_remesh = None
+        if self._is2kernel:
+            self._collect_kernels_cl_src_2k()
+            self._compute = self._compute_2k
+        else:
+            self._collect_kernels_cl_src_1k()
+            if self._isMultiScale:
+                self._compute = self._compute_1k_multiechelle
+            else:
+                if self.method[TimeIntegrator] is Euler:
+                    self._compute = self._compute_1k_euler_simpleechelle
+                else:
+                    self._compute = self._compute_1k_simpleechelle
+
         self._buffer_allocations()
-        if __VERBOSE__:
-            print "===\n"
-            print "=== OpenCL Buffer initialisation ==="
-        if self.dir == 0:
+        if self.direction == 0:
             self._buffer_initialisations()
 
-        # Beanching the proper _compute function
-        if self.part_advectedFields[0].nbComponents == 1:
-            self._compute = self._compute_1c
-        elif self.part_advectedFields[0].nbComponents == 2:
-            self._compute = self._compute_2c
-        elif self.part_advectedFields[0].nbComponents == 3:
-            self._compute = self._compute_3c
+        ## List of executions
+        self.exec_list = None
+        self._build_exec_list()
+
+        ## Particle initialisation OpenCL events for each field:
+        self._init_events = {self.fields_on_grid[0]: []}
+
+    @abstractmethod
+    def globalMemoryUsagePreview(self, v_shape, shape):
+        """
+        @param[in] v_shape: shape of the discretization of the velocity
+        @param[in] shape: shape of the discretization of the advected fields
+        @return size of the required memory
+        """
+        pass
 
+    def _build_exec_list(self):
         # Build execution list regarding splitting:
-        # In sequential configuration, data can stay in their direction
-        # dependant layout between directions. In parallel, data must be
-        # redistributed (between directions) in XYZ layout.
         # Splitting Strang 2nd order:
         #   3D: X(dt/2), Y(dt/2), Z(dt), Y(dt/2), X(dt/2)
         #   2D: X(dt/2), Y(dt), X(dt/2)
-        if np.prod(self.batch_nb) == 1 and self.method[Splitting] == 'o2':
-            if self._main_size > 1:
-                if self.dim == 2:
-                    self.exec_list = [
-                        [self._init_copy, self._compute],  # X(dt/2)
-                        [self._init_transpose_xy, self._compute,  # Y(dt)
-                         self._init_transpose_xy_r, self._init_copy_r],
-                        [self._init_copy, self._compute]  # X(dt/2)
-                        ]
-                elif self.dim == 3:
-                    self.exec_list = [
-                        [self._init_copy, self._compute],  # X(dt/2)
-                        [self._init_transpose_xy, self._compute,  # Y(dt/2)
-                         self._init_transpose_xy_r, self._init_copy_r],
-                        [self._init_transpose_xz, self._compute,  # Z(dt)
-                         self._init_transpose_xz_r, self._init_copy_r],
-                        [self._init_transpose_xy, self._compute,  # Y(dt/2)
-                         self._init_transpose_xy_r, self._init_copy_r],
-                        [self._init_copy, self._compute]  # X(dt/2)
-                        ]
-            else:
-                if self.dim == 2:
-                    self.exec_list = [
-                        [self._init_copy, self._compute],  # X(dt/2)
-                        [self._init_transpose_xy, self._compute],  # Y(dt)
-                        [self._init_transpose_xy, self._compute]  # X(dt/2)
-                        ]
-                elif self.dim == 3:
-                    self.exec_list = [
-                        [self._init_copy, self._compute],  # X(dt/2)
-                        [self._init_transpose_xy, self._compute],  # Y(dt/2)
-                        [self._init_transpose_xz, self._compute],  # Z(dt)
-                        [self._init_transpose_xz, self._compute],  # Y(dt/2)
-                        [self._init_transpose_xy, self._compute]  # X(dt/2)
-                        ]
-
-        # Splitting Strang 2nd order (fullHalf):
-        #   X(dt/2), Y(dt/2), Z(dt/2), Z(dt/2), Y(dt/2), X(dt/2)
-        elif np.prod(self.batch_nb) == 1 and \
-                self.method[Splitting] == 'o2_FullHalf':
-            if self._main_size > 1:
-                if self.dim == 2:
-                    self.exec_list = [
-                        [self._init_copy, self._compute],  # X(dt/2)
-                        [self._init_transpose_xy, self._compute],  # Y(dt/2)
-                        [self._init_copy, self._compute,  # Y(dt/2)
-                         self._init_transpose_xy_r, self._init_copy_r],
-                        [self._init_copy, self._compute]  # X(dt/2)
-                        ]
-                elif self.dim == 3:
-                    self.exec_list = [
-                        [self._init_copy, self._compute],  # X(dt/2)
-                        [self._init_transpose_xy, self._compute,  # Y(dt/2)
-                         self._init_transpose_xy_r, self._init_copy_r],
-                        [self._init_transpose_xz, self._compute],  # Z(dt/2)
-                        [self._init_copy, self._compute,  # Z(dt/2)
-                         self._init_transpose_xz_r, self._init_copy_r],
-                        [self._init_transpose_xy, self._compute,  # Y(dt/2)
-                         self._init_transpose_xy_r, self._init_copy_r],
-                        [self._init_copy, self._compute]  # X(dt/2)
-                        ]
-            else:
-                if self.dim == 2:
-                    self.exec_list = [
-                        [self._init_copy, self._compute],  # X(dt/2)
-                        [self._init_transpose_xy, self._compute],  # Y(dt)
-                        [self._init_copy, self._compute],  # Y(dt)
-                        [self._init_transpose_xy, self._compute]  # X(dt/2)
-                        ]
-                elif self.dim == 3:
-                    self.exec_list = [
-                        [self._init_copy, self._compute],  # X(dt/2)
-                        [self._init_transpose_xy, self._compute],  # Y(dt/2)
-                        [self._init_transpose_xz, self._compute],  # Z(dt/2)
-                        [self._init_copy, self._compute],  # Z(dt/2)
-                        [self._init_transpose_xz, self._compute],  # Y(dt/2)
-                        [self._init_transpose_xy, self._compute]  # X(dt/2)
-                        ]
-        # If batch, data are always transfered from/to host for each splitting
-        # step (memory layout is applied within transfering data)
-        elif np.prod(self.batch_nb) > 1 and \
-                self.method[Splitting] == 'o2':
+        if self.method[Splitting] == 'o2':
             if self.dim == 2:
                 self.exec_list = [
                     [self._init_copy, self._compute],  # X(dt/2)
-                    [self._init_copy, self._compute],  # Y(dt)
-                    [self._init_copy, self._compute],  # X(dt/2)
-                    ]
+                    [self._init_transpose_xy, self._compute],  # Y(dt)
+                    [self._init_transpose_xy, self._compute]  # X(dt/2)
+                ]
             elif self.dim == 3:
                 self.exec_list = [
                     [self._init_copy, self._compute],  # X(dt/2)
-                    [self._init_copy, self._compute],  # Y(dt/2)
-                    [self._init_copy, self._compute],  # Z(dt)
-                    [self._init_copy, self._compute],  # Y(dt/2)
-                    [self._init_copy, self._compute],  # X(dt/2)
-                    ]
-        elif np.prod(self.batch_nb) > 1 and \
-                self.method[Splitting] == 'o2_FullHalf':
+                    [self._init_transpose_xy, self._compute],  # Y(dt/2)
+                    [self._init_transpose_xz, self._compute],  # Z(dt)
+                    [self._init_transpose_xz, self._compute],  # Y(dt/2)
+                    [self._init_transpose_xy, self._compute]  # X(dt/2)
+                ]
+
+        # Splitting Strang 2nd order (fullHalf):
+        #   X(dt/2), Y(dt/2), Z(dt/2), Z(dt/2), Y(dt/2), X(dt/2)
+        elif self.method[Splitting] == 'o2_FullHalf':
             if self.dim == 2:
                 self.exec_list = [
                     [self._init_copy, self._compute],  # X(dt/2)
-                    [self._init_copy, self._compute],  # Y(dt/2)
-                    [self._init_copy, self._compute],  # Y(dt/2)
-                    [self._init_copy, self._compute],  # X(dt/2)
-                    ]
+                    [self._init_transpose_xy, self._compute],  # Y(dt)
+                    [self._init_copy, self._compute],  # Y(dt)
+                    [self._init_transpose_xy, self._compute]  # X(dt/2)
+                ]
             elif self.dim == 3:
                 self.exec_list = [
                     [self._init_copy, self._compute],  # X(dt/2)
-                    [self._init_copy, self._compute],  # Y(dt/2)
-                    [self._init_copy, self._compute],  # Z(dt/2)
+                    [self._init_transpose_xy, self._compute],  # Y(dt/2)
+                    [self._init_transpose_xz, self._compute],  # Z(dt/2)
                     [self._init_copy, self._compute],  # Z(dt/2)
-                    [self._init_copy, self._compute],  # Y(dt/2)
-                    [self._init_copy, self._compute],  # X(dt/2)
-                    ]
+                    [self._init_transpose_xz, self._compute],  # Y(dt/2)
+                    [self._init_transpose_xy, self._compute]  # X(dt/2)
+                ]
+        elif self.method[Splitting] == 'x_only':
+            self.exec_list = [
+                [self._init_copy, self._compute],  # X(dt)
+                #[self._init_copy, self._init_copy_r],  # X(dt)
+                ]
         else:
             raise ValueError('Not yet implemeted Splitting on GPU : ' +
                              self.method[Splitting])
 
-        if np.prod(self.batch_nb) > 1:
-            self._the_apply = self._apply_batch
+    def globalMemoryUsagePreview(self, v_shape, shape):
+        if self._is2kernel:
+            r = (self.velocity.nbComponents * v_shape.prod() +
+                 (2 * self.fields_on_grid[0].nbComponents + 1) * shape.prod())
         else:
-            self._the_apply = self._apply_noBatch
-
-        if __VERBOSE__:
-            print "===\n"
+            r = (self.velocity.nbComponents * v_shape.prod() +
+                 2 * self.fields_on_grid[0].nbComponents * shape.prod())
+        return r * self.cl_env.prec_size
 
-    def _collect_kernels_cl_src(self):
+    def _configure_numerical_methods(self):
         pass
 
     def _buffer_allocations(self):
-        raise ValueError('This method must be implemented in subclasses.')
-
-    def _compute_1c(self):
-        raise ValueError('This method must be implemented in subclasses.')
-
-    def _compute_2c(self):
-        raise ValueError('This method must be implemented in subclasses.')
-
-    def _compute_3c(self):
-        raise ValueError('This method must be implemented in subclasses.')
+        """
+        Allocate OpenCL buffers for velocity and advected field.
+        """
+        ## Velocity.
+        alloc = not isinstance(self.velocity, GPUDiscreteField)
+        GPUDiscreteField.fromField(self.cl_env, self.velocity,
+                                   self.gpu_precision, simple_layout=False)
+        if alloc:
+            self.size_global_alloc += self.velocity.mem_size
+
+        ## Transported field.
+        alloc = not isinstance(self.fields_on_grid[0], GPUDiscreteField)
+        GPUDiscreteField.fromField(self.cl_env,
+                                   self.fields_on_grid[0],
+                                   self.gpu_precision,
+                                   layout=False)
+        if alloc:
+            self.size_global_alloc += self.fields_on_grid[0].mem_size
+
+        ## Fields on particles
+        self.fields_on_part = {}
+        start = 0
+        for f in self.fields_on_grid:
+            for i in xrange(start, start + f.nbComponents):
+                if type(self._rwork[i]) is np.ndarray:
+                    self._rwork[i] = \
+                        self.cl_env.global_allocation(self._rwork[i])
+            self.fields_on_part[f] = self._rwork[start: start + f.nbComponents]
+            start += f.nbComponents
+
+        if self._is2kernel:
+            ## Particles position
+            if type(self._rwork[start]) is np.ndarray:
+                self._rwork[start] = \
+                    self.cl_env.global_allocation(self._rwork[start])
+            self.part_position = self._rwork[start:start + 1]
+
+        self._work = self.fields_on_part.values()
 
     def _buffer_initialisations(self):
         """
@@ -381,9 +314,9 @@ class GPUParticleAdvection(ParticleAdvection):
         Looking for kernels named <code>init<FieldName></code>.
         """
         for gpudf in self.variables:
-            match = 'init' + gpudf.name
+            match = 'init' + '_'.join(gpudf.name.split('_')[:-1])
             # Looking for initKernel
-            if not self.prg is None:
+            if self.prg is not None:
                 for k in self.prg.all_kernels():
                     k_name = k.get_info(cl.kernel_info.FUNCTION_NAME)
                     if match.find(k_name) >= 0:
@@ -403,237 +336,244 @@ class GPUParticleAdvection(ParticleAdvection):
         """
         Compile OpenCL sources for copy kernel.
         """
-        build_options = self.build_options
-        # copy settings
-        src, t_dim, b_rows, vec, f_space = self._kernel_cfg['copy']
-        while t_dim > self.resol_dir[0] or (self.resol_dir[0] % t_dim) > 0:
-            t_dim /= 2
-        gwi, lwi = f_space(self.resol_dir, t_dim, b_rows, vec)
-
-        # Build code
-        build_options += " -D TILE_DIM_COPY={0}".format(t_dim)
-        build_options += " -D BLOCK_ROWS_COPY={0}".format(b_rows)
-        build_options += self._size_constants + self._constants[self.dir]
-        prg = self.cl_env.build_src(
-            src,
-            build_options,
-            vec)
-        self.copy = KernelLauncher(prg.copy,
-                                   self.cl_env.queue, gwi, lwi)
+        # build_options = self.build_options
+        # # copy settings
+        # src, t_dim, b_rows, vec, f_space = self._kernel_cfg['copy']
+        # while t_dim > self.resol_dir[0] or (self.resol_dir[0] % t_dim) > 0:
+        #     t_dim /= 2
+        # gwi, lwi = f_space(self.resol_dir, t_dim, b_rows, vec)
+
+        # # Build code
+        # build_options += " -D TILE_DIM_COPY={0}".format(t_dim)
+        # build_options += " -D BLOCK_ROWS_COPY={0}".format(b_rows)
+        # build_options += self._size_constants
+        # prg = self.cl_env.build_src(
+        #     src,
+        #     build_options,
+        #     vec)
+        # self.copy = KernelLauncher(prg.copy,
+        #                            self.cl_env.queue, gwi, lwi)
+        self.copy = KernelLauncher(cl.enqueue_copy,
+                                   self.cl_env.queue)
 
     def _collect_kernels_cl_src_transpositions_xy(self):
         """Compile OpenCL sources for transpositions kernel.
         @remark : Transpositions kernels are launched as initialization.
-        Arrays are taken in their destination layout (for initialize in Y
+        Arrays are taken to their destination layout (for initialize in Y
         directions, either we came from X or Z but shapes are the Y ones).
+
+        This routine sets transpose_xy and transpose_xy_r.
         """
-        resol = self.advectedFields[0].topology.mesh.resolution
-        resol_tmp = np.empty_like(resol)
+        resol = self.fields_topo.mesh.resolution
+        resol_tmp = npw.zeros_like(resol)
 
         # XY transposition settings
-        is_XY_needed = self.dir == 1 or (self.dir == 0 and self._main_size == 1)
+        is_XY_needed = self.direction == 1 or self.direction == 0
         if is_XY_needed:
             resol_tmp[...] = resol[...]
-            if self.dir == 1:  # (XY -> YX)
+            if self.direction == 1:  # (XY -> YX)
                 resol_tmp[0] = resol[1]
                 resol_tmp[1] = resol[0]
                 ocl_cte = " -D NB_I=NB_Y -D NB_II=NB_X -D NB_III=NB_Z"
-            elif self.dir == 0:  # (YX -> XY) only for sequential
+            elif self.direction == 0:  # (YX -> XY) only for sequential
                 ocl_cte = " -D NB_I=NB_X -D NB_II=NB_Y -D NB_III=NB_Z"
-            build_options = self.build_options + self._size_constants
-            src, t_dim, b_rows, is_padding, vec, f_space = \
-                self._kernel_cfg['transpose_xy']
-            while t_dim > resol_tmp[0] or t_dim > resol_tmp[1] or \
-                    (resol_tmp[0] % t_dim) > 0 or (resol_tmp[1] % t_dim) > 0:
-                t_dim /= 2
-            gwi, lwi = f_space(resol_tmp, t_dim, b_rows, vec)
-
-            if is_padding:
-                build_options += " -D PADDING_XY=1"
-            else:
-                build_options += " -D PADDING_XY=0"
-            build_options += " -D TILE_DIM_XY={0}".format(t_dim)
-            build_options += " -D BLOCK_ROWS_XY={0}".format(b_rows)
-            build_options += ocl_cte
-            prg = self.cl_env.build_src(
-                src,
-                build_options, vec)
-
-            self.transpose_xy = KernelLauncher(
-                prg.transpose_xy, self.cl_env.queue, gwi, lwi)
-
-        is_XY_r_needed = self.dir == 1 and self._main_size > 1
-        if is_XY_r_needed:
-            # Reversed XY transposition settings (YX -> XY), only in parallel
-            resol_tmp[...] = resol[...]
-            ocl_cte = " -D NB_I=NB_X -D NB_II=NB_Y -D NB_III=NB_Z"
 
-            build_options = self.build_options + self._size_constants
-            src, t_dim, b_rows, is_padding, vec, f_space = \
-                self._kernel_cfg['transpose_xy']
-            while t_dim > resol_tmp[0] or t_dim > resol_tmp[1] or \
-                    (resol_tmp[0] % t_dim) > 0 or (resol_tmp[1] % t_dim) > 0:
-                t_dim /= 2
-            gwi, lwi = f_space(resol_tmp, t_dim, b_rows, vec)
-
-            if is_padding:
-                build_options += " -D PADDING_XY=1"
-            else:
-                build_options += " -D PADDING_XY=0"
-            build_options += " -D TILE_DIM_XY={0}".format(t_dim)
-            build_options += " -D BLOCK_ROWS_XY={0}".format(b_rows)
-            build_options += ocl_cte
-            prg = self.cl_env.build_src(
-                src,
-                build_options, vec)
+            self.transpose_xy = self._make_transpose_xy(resol_tmp, ocl_cte)
+
+        # is_XY_r_needed = self.direction == 1 and self._comm_size > 1
+        # if is_XY_r_needed:
+        #     # Reversed XY transposition settings (YX -> XY), only in parallel
+        #     resol_tmp[...] = resol[...]
+        #     ocl_cte = " -D NB_I=NB_X -D NB_II=NB_Y -D NB_III=NB_Z"
+
+        #     self.transpose_xy_r = self._make_transpose_xy(resol_tmp, ocl_cte)
+
+    def _make_transpose_xy(self, resol_tmp, ocl_cte):
 
-            self.transpose_xy_r = KernelLauncher(
-                prg.transpose_xy, self.cl_env.queue, gwi, lwi)
+        build_options = self.build_options + self._size_constants
+        src, t_dim, b_rows, is_padding, vec, f_space = \
+            self._kernel_cfg['transpose_xy']
+        while t_dim > resol_tmp[0] or t_dim > resol_tmp[1] or \
+                (resol_tmp[0] % t_dim) > 0 or (resol_tmp[1] % t_dim) > 0:
+            t_dim /= 2
+        gwi, lwi = f_space(resol_tmp, t_dim, b_rows, vec)
+
+        if is_padding:
+            build_options += " -D PADDING_XY=1"
+        else:
+            build_options += " -D PADDING_XY=0"
+        build_options += " -D TILE_DIM_XY={0}".format(t_dim)
+        build_options += " -D BLOCK_ROWS_XY={0}".format(b_rows)
+        build_options += ocl_cte
+        prg = self.cl_env.build_src(src, build_options, vec)
+        return KernelLauncher(prg.transpose_xy, self.cl_env.queue, gwi, lwi)
 
     def _collect_kernels_cl_src_transpositions_xz(self):
-        resol = self.advectedFields[0].topology.mesh.resolution
-        resol_tmp = np.empty_like(resol)
+        resol = self.fields_topo.mesh.resolution
+        resol_tmp = npw.zeros_like(resol)
 
-        is_XZ_needed = self.dir == 2 or (self.dir == 1 and self._main_size == 1)
+        is_XZ_needed = self.direction == 2 or self.direction == 1
         # XZ transposition settings
         if is_XZ_needed:
             resol_tmp[...] = resol[...]
-            if self.dir == 1:  # ZXY -> YXZ (only for seqential)
+            if self.direction == 1:  # ZXY -> YXZ (only for seqential)
                 resol_tmp[0] = resol[1]
                 resol_tmp[1] = resol[0]
                 resol_tmp[2] = resol[2]
                 ocl_cte = " -D NB_I=NB_Y -D NB_II=NB_X -D NB_III=NB_Z"
-            elif self.dir == 2:
-                if self._main_size == 1:  # YXZ -> ZXY
-                    resol_tmp[0] = resol[2]
-                    resol_tmp[1] = resol[0]
-                    resol_tmp[2] = resol[1]
-                    ocl_cte = " -D NB_I=NB_Z -D NB_II=NB_X -D NB_III=NB_Y"
-                else:  # XYZ -> ZYX
-                    resol_tmp[0] = resol[2]
-                    resol_tmp[1] = resol[1]
-                    resol_tmp[2] = resol[0]
-                    ocl_cte = " -D NB_I=NB_Z -D NB_II=NB_Y -D NB_III=NB_X"
+            elif self.direction == 2:
+                # YXZ -> ZXY
+                resol_tmp[0] = resol[2]
+                resol_tmp[1] = resol[0]
+                resol_tmp[2] = resol[1]
+                ocl_cte = " -D NB_I=NB_Z -D NB_II=NB_X -D NB_III=NB_Y"
+                # else:  # XYZ -> ZYX
+                #     resol_tmp[0] = resol[2]
+                #     resol_tmp[1] = resol[1]
+                #     resol_tmp[2] = resol[0]
+                #     ocl_cte = " -D NB_I=NB_Z -D NB_II=NB_Y -D NB_III=NB_X"
+            self.transpose_xz = self._make_transpose_xz(resol_tmp, ocl_cte)
+
+        # is_XZ_r_needed = self.direction == 2 and self._comm_size > 1
+        # if is_XZ_r_needed:
+        #     # Reversed XZ transposition settings (ZYX -> XYZ)
+        #     resol_tmp[...] = resol[...]
+        #     ocl_cte = " -D NB_I=NB_X -D NB_II=NB_Y -D NB_III=NB_Z"
+        #     self.transpose_xz_r = self._make_transpose_xz(resol_tmp, ocl_cte)
+
+    def _make_transpose_xz(self, resol_tmp, ocl_cte):
 
-            build_options = self.build_options + self._size_constants
-            src, t_dim, b_rows, b_deph, is_padding, vec, f_space = \
-                self._kernel_cfg['transpose_xz']
-
-            while t_dim > resol_tmp[0] or t_dim > resol_tmp[2] or \
-                    (resol_tmp[0] % t_dim) > 0 or (resol_tmp[2] % t_dim) > 0:
-                t_dim /= 2
-            gwi, lwi = f_space(resol_tmp, t_dim, b_rows, b_deph, vec)
-            if is_padding:
-                build_options += " -D PADDING_XZ=1"
-            else:
-                build_options += " -D PADDING_XZ=0"
-            build_options += " -D TILE_DIM_XZ={0}".format(t_dim)
-            build_options += " -D BLOCK_ROWS_XZ={0}".format(b_rows)
-            build_options += " -D BLOCK_DEPH_XZ={0}".format(b_deph)
-            build_options += ocl_cte
-            prg = self.cl_env.build_src(
-                src,
-                build_options,
-                vec)
-            self.transpose_xz = KernelLauncher(
-                prg.transpose_xz, self.cl_env.queue, gwi, lwi)
-
-        is_XZ_r_needed = self.dir == 2 and self._main_size > 1
-        if is_XZ_r_needed:
-            # Reversed XZ transposition settings (ZYX -> XYZ)
-            resol_tmp[...] = resol[...]
-            ocl_cte = " -D NB_I=NB_X -D NB_II=NB_Y -D NB_III=NB_Z"
-            build_options = self.build_options + self._size_constants
-            src, t_dim, b_rows, b_deph, is_padding, vec, f_space = \
-                self._kernel_cfg['transpose_xz']
-
-            while t_dim > resol_tmp[0] or t_dim > resol_tmp[2] or \
-                    (resol_tmp[0] % t_dim) > 0 or (resol_tmp[2] % t_dim) > 0:
-                t_dim /= 2
-            gwi, lwi = f_space(resol_tmp, t_dim, b_rows, b_deph, vec)
-            if is_padding:
-                build_options += " -D PADDING_XZ=1"
-            else:
-                build_options += " -D PADDING_XZ=0"
-            build_options += " -D TILE_DIM_XZ={0}".format(t_dim)
-            build_options += " -D BLOCK_ROWS_XZ={0}".format(b_rows)
-            build_options += " -D BLOCK_DEPH_XZ={0}".format(b_deph)
-            build_options += ocl_cte
-            prg = self.cl_env.build_src(
-                src,
-                build_options,
-                vec)
-            self.transpose_xz_r = KernelLauncher(
-                prg.transpose_xz, self.cl_env.queue, gwi, lwi)
+        build_options = self.build_options + self._size_constants
+        src, t_dim, b_rows, b_deph, is_padding, vec, f_space = \
+            self._kernel_cfg['transpose_xz']
+
+        while t_dim > resol_tmp[0] or t_dim > resol_tmp[2] or \
+                (resol_tmp[0] % t_dim) > 0 or (resol_tmp[2] % t_dim) > 0:
+            t_dim /= 2
+        gwi, lwi = f_space(resol_tmp, t_dim, b_rows, b_deph, vec)
+        if is_padding:
+            build_options += " -D PADDING_XZ=1"
+        else:
+            build_options += " -D PADDING_XZ=0"
+        build_options += " -D TILE_DIM_XZ={0}".format(t_dim)
+        build_options += " -D BLOCK_ROWS_XZ={0}".format(b_rows)
+        build_options += " -D BLOCK_DEPH_XZ={0}".format(b_deph)
+        build_options += ocl_cte
+        prg = self.cl_env.build_src(
+            src,
+            build_options,
+            vec)
+        return KernelLauncher(prg.transpose_xz, self.cl_env.queue, gwi, lwi)
 
     def _collect_usr_cl_src(self, usr_src):
         """
         Build user sources.
 
         """
-        build_options = self.build_options + self._size_constants
-        workItemNb, gwi, lwi = self.cl_env.get_WorkItems(self.resol_dir)
-        v_workItemNb, gwi, lwi = self.cl_env.get_WorkItems(self.v_resol_dir)
-        build_options += " -D WI_NB=" + str(workItemNb)
-        build_options += " -D V_WI_NB=" + str(v_workItemNb)
         if usr_src is not None:
-            self.prg = self.cl_env.build_src(usr_src, build_options)
+            build_options = self.build_options + self._size_constants
+            workItemNb, gwi, lwi = self.cl_env.get_WorkItems(self.resol_dir)
+            v_workItemNb, gwi, lwi = self.cl_env.get_WorkItems(self.v_resol_dir)
+            build_options += " -D WI_NB=" + str(workItemNb)
+            build_options += " -D V_WI_NB=" + str(v_workItemNb)
+            self.prg = self.cl_env.build_src(usr_src, build_options, 1)
 
-    def _apply_noBatch(self, simulation, dtCoeff, split_id, old_dir):
-        # Call OpenCL kernels
-        for exe in self.exec_list[split_id]:
-            exe(simulation, dtCoeff, split_id, old_dir)
 
-    def _apply_batch(self, simulation, dtCoeff, split_id, old_dir):
-        if split_id > self.dir:
-            ## Returning from Z -> X : Reverse batch order
-            batch_order = xrange(np.prod(self.batch_nb)-1, -1, -1)
+    def _collect_kernels_cl_src_1k(self):
+        """
+        Compile OpenCL sources for advection and remeshing kernel.
+        """
+        build_options = self.build_options + self._size_constants
+        src, is_noBC, vec, f_space = self._kernel_cfg['advec_and_remesh']
+        gwi, lwi = f_space(self.resol_dir, vec)
+        WINb = lwi[0]
+        build_options += " -D FORMULA=" + self.method[Remesh].__name__.upper()
+        if self._isMultiScale:
+            build_options += " -D MS_FORMULA="
+            build_options += self.method[MultiScale].__name__.upper()
+        if is_noBC:
+            build_options += " -D WITH_NOBC=1"
+        build_options += " -D WI_NB=" + str(WINb)
+        build_options += " -D PART_NB_PER_WI="
+        build_options += str(self.resol_dir[0] / WINb)
+        ## Build code
+        src = [s.replace('RKN', self.method[TimeIntegrator].__name__.lower())
+               for s in src]
+        ## Euler integrator
+        if self.method[TimeIntegrator] is Euler:
+            if not self._isMultiScale:
+                src = [s for s in src if s.find(Euler.__name__.lower()) < 0]
+                src[-1] = src[-1].replace('advection', 'advection_euler')
+        prg = self.cl_env.build_src(
+            src, build_options, vec,
+            nb_remesh_components=self.fields_on_grid[0].nbComponents)
+
+        self.num_advec_and_remesh = KernelLauncher(
+            prg.advection_and_remeshing, self.cl_env.queue, gwi, lwi)
+
+    def _collect_kernels_cl_src_2k(self):
+        """
+        Compile OpenCL sources for advection and remeshing kernel.
+        """
+        # Advection
+        build_options = self.build_options + self._size_constants
+        src, is_noBC, vec, f_space = self._kernel_cfg['advec']
+        gwi, lwi = f_space(self.resol_dir, vec)
+        WINb = lwi[0]
+        if self._isMultiScale:
+            build_options += " -D MS_FORMULA="
+            build_options += self.method[MultiScale].__name__.upper()
+            self._compute_advec = self._compute_advec_multiechelle
         else:
-            batch_order = xrange(np.prod(self.batch_nb))
-        for b in batch_order:
-            # Checking if velocity is already on device (transfered by an
-            # explicit redistribute operator)
-            toDevice_useless = \
-                self.velocity.batch_on_device[self.dir] == b \
-                and self.velocity.batch_is_on_device[self.dir]
-            if not toDevice_useless:
-                self.velocity.toDevice(component=self.dir, batch_d=b)
-
-            # Checking if advected field is already on device
-            toDevice_useless = np.all(
-                [b_id == b and is_b for b_id, is_b
-                 in zip(self.advectedFields[0].batch_on_device,
-                        self.advectedFields[0].batch_is_on_device)])
-            # For full-half splitting, for the second advection in Z
-            # direction, the last batch is already on device (batches are
-            # handled in reversed order)
-            isFullHalf = (self.dim == 3 and len(self.exec_list) == 6) or \
-                (self.dim == 2 and len(self.exec_list) == 4)
-            isFirstLastDir = \
-                (self.dim == 3 and self.dir == 2 and split_id == 2) or \
-                (self.dim == 2 and self.dir == 1 and split_id == 1)
-            isSecondLastDir = \
-                (self.dim == 3 and self.dir == 2 and split_id == 3) or \
-                (self.dim == 2 and self.dir == 1 and split_id == 2)
-            isLastBatch = b == (np.prod(self.batch_nb) - 1)
-            if not (isFullHalf and isSecondLastDir and isLastBatch):
-                 if not toDevice_useless:
-                     self.advectedFields[0].toDevice(
-                         layoutDir=self.dir, batch_d=b)
-
-            # Call OpenCL kernels
-            for exe in self.exec_list[split_id]:
-                exe(simulation, dtCoeff, split_id, old_dir)
-
-            # Send back the batch to host
-            # For full-half splitting, for the first advection in Z
-            # direction, the last batch not get back to host, it will be
-            # used in place in the second step
-            if not (isFullHalf and isFirstLastDir and isLastBatch):
-                self.advectedFields[0].toHost(
-                    layoutDir=self.dir, batch_d=b)
+            self._compute_advec = self._compute_advec_simpleechelle
+
+        if is_noBC:
+            build_options += " -D WITH_NOBC=1"
+        build_options += " -D WI_NB=" + str(WINb)
+        build_options += " -D PART_NB_PER_WI="
+        build_options += str(self.resol_dir[0] / WINb)
+        # Build code
+        src = [s.replace('RKN', self.method[TimeIntegrator].__name__.lower())
+               for s in src]
+        ## Adding remeshing weights for the multiscale advection
+        if self._isMultiScale:
+            src.insert(1, self._kernel_cfg['remesh'][0][1])
+        ## Euler integrator
+        if self.method[TimeIntegrator] is Euler:
+            if not self._isMultiScale:
+                src = [s for s in src if s.find(Euler.__name__.lower()) < 0]
+                src[-1] = src[-1].replace('advection', 'advection_euler')
+                self._compute_advec = self._compute_advec_euler_simpleechelle
+        prg = self.cl_env.build_src(
+            src,
+            build_options,
+            vec,
+            nb_remesh_components=self.fields_on_grid[0].nbComponents)
+
+        self.num_advec = KernelLauncher(
+            prg.advection_kernel, self.cl_env.queue, gwi, lwi)
+
+        # remeshing
+        build_options = self.build_options + self._size_constants
+        src, is_noBC, vec, f_space = self._kernel_cfg['remesh']
+        gwi, lwi = f_space(self.resol_dir, vec)
+        WINb = lwi[0]
+
+        build_options += " -D FORMULA=" + self.method[Remesh].__name__.upper()
+        if is_noBC:
+            build_options += " -D WITH_NOBC=1"
+        build_options += " -D WI_NB=" + str(WINb)
+        build_options += " -D PART_NB_PER_WI="
+        build_options += str(self.resol_dir[0] / WINb)
+        ## Build code
+        prg = self.cl_env.build_src(
+            src, build_options, vec,
+            nb_remesh_components=self.fields_on_grid[0].nbComponents)
+        self.num_remesh = KernelLauncher(
+            prg.remeshing_kernel, self.cl_env.queue, gwi, lwi)
 
     @debug
+    @profile
     def apply(self, simulation, dtCoeff, split_id, old_dir):
         """
         Apply operator along specified splitting direction.
@@ -642,76 +582,182 @@ class GPUParticleAdvection(ParticleAdvection):
         @param d : Splitting direction
         @param split_id : Splitting step id
         """
-        # Calling for requirements completion
-        DiscreteOperator.apply(self, simulation)
-        ctime = MPI.Wtime()
-
         # If first direction of advection, wait for work gpu fields
         # It avoid wait_for lists to increase indelinitely
         # In practice, all events are terminated so wait() resets events list
         if split_id == 0:
-            for v in self._work + self.advectedFields + [self.velocity]:
+            for v in self.fields_on_grid + [self.velocity]:
                 v.clean_events()
-        self._the_apply(simulation, dtCoeff, split_id, old_dir)
-        self._apply_timer.append_time(MPI.Wtime() - ctime)
+        for exe in self.exec_list[split_id]:
+            exe(simulation, dtCoeff, split_id, old_dir)
 
     def _init_copy(self, simulation, dtCoeff, split_id, old_dir):
-        wait_evt = self.advectedFields[0].events + \
-            self.part_advectedFields[0].events
-        for p, g in zip(self.advectedFields[0].gpu_data,
-                        self.part_advectedFields[0].gpu_data):
-            evt = self.copy(p.data, g.data, wait_for=wait_evt)
-            self.part_advectedFields[0].events.append(evt)
-
-    def _init_copy_r(self, simulation, dtCoeff, split_id, old_dir):
-        wait_evt = self.advectedFields[0].events + \
-            self.part_advectedFields[0].events
-        for p, g in zip(self.advectedFields[0].gpu_data,
-                        self.part_advectedFields[0].gpu_data):
-            evt = self.copy(g.data, p.data, wait_for=wait_evt)
-            self.advectedFields[0].events.append(evt)
+        wait_evt = self.fields_on_grid[0].events
+        for g, p in zip(self.fields_on_grid[0].gpu_data,
+                        self.fields_on_part[self.fields_on_grid[0]]):
+            evt = self.copy.launch_sizes_in_args(p, g, wait_for=wait_evt)
+            #evt = self.copy(g, p, wait_for=wait_evt)
+            self._init_events[self.fields_on_grid[0]].append(evt)
+
+    # def _init_copy_r(self, simulation, dtCoeff, split_id, old_dir):
+    #     wait_evt = self.fields_on_grid[0].events
+    #     for g, p in zip(self.fields_on_grid[0].gpu_data,
+    #                  self.fields_on_part[self.fields_on_grid[0]]):
+    #         evt = self.copy.launch_sizes_in_args(g, p, wait_for=wait_evt)
+    #         #evt = self.copy(p, g, wait_for=wait_evt)
+    #         self._init_events[self.fields_on_grid[0]].append(evt)
 
     def _init_transpose_xy(self, simulation, dtCoeff, split_id, old_dir):
-        wait_evt = self.advectedFields[0].events + \
-            self.part_advectedFields[0].events
-        for p, g in zip(self.advectedFields[0].gpu_data,
-                        self.part_advectedFields[0].gpu_data):
-            evt = self.transpose_xy(p.data, g.data, wait_for=wait_evt)
-            self.part_advectedFields[0].events.append(evt)
-
-    def _init_transpose_xy_r(self, simulation, dtCoeff, split_id, old_dir):
-        wait_evt = self.advectedFields[0].events + \
-            self.part_advectedFields[0].events
-        for p, g in zip(self.advectedFields[0].gpu_data,
-                        self.part_advectedFields[0].gpu_data):
-            evt = self.transpose_xy_r(p.data, g.data, wait_for=wait_evt)
-            self.part_advectedFields[0].events.append(evt)
+        wait_evt = self.fields_on_grid[0].events
+        for g, p in zip(self.fields_on_grid[0].gpu_data,
+                        self.fields_on_part[self.fields_on_grid[0]]):
+            evt = self.transpose_xy(g, p, wait_for=wait_evt)
+            self._init_events[self.fields_on_grid[0]].append(evt)
+
+    # def _init_transpose_xy_r(self, simulation, dtCoeff, split_id, old_dir):
+    #     wait_evt = self.fields_on_grid[0].events
+    #     for g, p in zip(self.fields_on_grid[0].gpu_data,
+    #                     self.fields_on_part[self.fields_on_grid[0]]):
+    #         evt = self.transpose_xy_r(p, g, wait_for=wait_evt)
+    #         self._init_events[self.fields_on_grid[0]].append(evt)
 
     def _init_transpose_xz(self, simulation, dtCoeff, split_id, old_dir):
-        wait_evt = self.advectedFields[0].events + \
-            self.part_advectedFields[0].events
-        for p, g in zip(self.advectedFields[0].gpu_data,
-                        self.part_advectedFields[0].gpu_data):
-            evt = self.transpose_xz(p.data, g.data, wait_for=wait_evt)
-            self.part_advectedFields[0].events.append(evt)
-
-    def _init_transpose_xz_r(self, simulation, dtCoeff, split_id, old_dir):
-        wait_evt = self.advectedFields[0].events + \
-            self.part_advectedFields[0].events
-        for p, g in zip(self.advectedFields[0].gpu_data,
-                        self.part_advectedFields[0].gpu_data):
-            evt = self.transpose_xz_r(p.data, g.data, wait_for=wait_evt)
-            self.part_advectedFields[0].events.append(evt)
+        wait_evt = self.fields_on_grid[0].events
+        for g, p in zip(self.fields_on_grid[0].gpu_data,
+                        self.fields_on_part[self.fields_on_grid[0]]):
+            evt = self.transpose_xz(g, p, wait_for=wait_evt)
+            self._init_events[self.fields_on_grid[0]].append(evt)
+
+    # def _init_transpose_xz_r(self, simulation, dtCoeff, split_id, old_dir):
+    #     wait_evt = self.fields_on_grid[0].events
+    #     for g, p in zip(self.fields_on_grid[0].gpu_data,
+    #                     self.fields_on_part[self.fields_on_grid[0]]):
+    #         evt = self.transpose_xz_r(p, g, wait_for=wait_evt)
+    #         self._init_events[self.fields_on_grid[0]].append(evt)
+
+    def _compute_advec_euler_simpleechelle(self, simulation, dtCoeff, split_id, old_dir):
+        dt = simulation.timeStep * dtCoeff
+        wait_evts = self.velocity.events + \
+            self._init_events[self.fields_on_grid[0]]
+        # Advection
+        evt = self.num_advec(
+            self.velocity.gpu_data[self.direction],
+            self.part_position[0],
+            self.gpu_precision(dt),
+            self._cl_mesh_info,
+            wait_for=wait_evts)
+        self._init_events[self.fields_on_grid[0]].append(evt)
+
+    def _compute_advec_simpleechelle(self, simulation, dtCoeff, split_id, old_dir):
+        dt = simulation.timeStep * dtCoeff
+        wait_evts = self.velocity.events + \
+            self._init_events[self.fields_on_grid[0]]
+        # Advection
+        evt = self.num_advec(
+            self.velocity.gpu_data[self.direction],
+            self.part_position[0],
+            self.gpu_precision(dt),
+            self._cl_mesh_info,
+            wait_for=wait_evts)
+        self._init_events[self.fields_on_grid[0]].append(evt)
+
+    def _compute_advec_multiechelle(self, simulation, dtCoeff, split_id, old_dir):
+        dt = simulation.timeStep * dtCoeff
+        wait_evts = self.velocity.events + \
+            self._init_events[self.fields_on_grid[0]]
+        # Advection
+        evt = self.num_advec(
+            self.velocity.gpu_data[self.direction],
+            self.part_position[0],
+            self.gpu_precision(dt),
+            self.gpu_precision(1. / self._v_mesh_size[1]),
+            self.gpu_precision(1. / self._v_mesh_size[2]),
+            self._cl_mesh_info,
+            wait_for=wait_evts)
+        self._init_events[self.fields_on_grid[0]].append(evt)
+
+    def _compute_2k(self, simulation, dtCoeff, split_id, old_dir):
+        self._compute_advec(simulation, dtCoeff, split_id, old_dir)
+        wait_evts = self._init_events[self.fields_on_grid[0]] + \
+            self.fields_on_grid[0].events
+        nbc = self.fields_on_grid[0].nbComponents
+        evt = self.num_remesh(*tuple(
+            [self.part_position[0], ] +
+            [self.fields_on_part[self.fields_on_grid[0]][i]
+             for i in xrange(nbc)] +
+            [self.fields_on_grid[0].gpu_data[i] for i in xrange(nbc)] +
+            [self._cl_mesh_info, ]),
+                              wait_for=wait_evts)
+        self.fields_on_grid[0].events.append(evt)
+        self._init_events[self.fields_on_grid[0]] = []
+
+    def _compute_1k_multiechelle(self, simulation, dtCoeff, split_id, old_dir):
+        dt = simulation.timeStep * dtCoeff
+        wait_evts = self.velocity.events + \
+            self._init_events[self.fields_on_grid[0]] + \
+            self.fields_on_grid[0].events
+        nbc = self.fields_on_grid[0].nbComponents
+        evt = self.num_advec_and_remesh(*tuple(
+            [self.velocity.gpu_data[self.direction], ] +
+            [self.fields_on_part[self.fields_on_grid[0]][i]
+             for i in xrange(nbc)] +
+            [self.fields_on_grid[0].gpu_data[i] for i in xrange(nbc)] +
+            [self.gpu_precision(dt),
+             self.gpu_precision(1. / self._v_mesh_size[1]),
+             self.gpu_precision(1. / self._v_mesh_size[2]),
+             self._cl_mesh_info]),
+                                        wait_for=wait_evts)
+        self.fields_on_grid[0].events.append(evt)
+        self._init_events[self.fields_on_grid[0]] = []
+
+    def _compute_1k_simpleechelle(self, simulation, dtCoeff, split_id, old_dir):
+        dt = simulation.timeStep * dtCoeff
+        wait_evts = self.velocity.events + \
+            self._init_events[self.fields_on_grid[0]] + \
+            self.fields_on_grid[0].events
+        nbc = self.fields_on_grid[0].nbComponents
+        evt = self.num_advec_and_remesh(*tuple(
+            [self.velocity.gpu_data[self.direction], ] +
+            [self.fields_on_part[self.fields_on_grid[0]][i]
+             for i in xrange(nbc)] +
+            [self.fields_on_grid[0].gpu_data[i] for i in xrange(nbc)] +
+            [self.gpu_precision(dt), self._cl_mesh_info]),
+                                        wait_for=wait_evts)
+        self.fields_on_grid[0].events.append(evt)
+        self._init_events[self.fields_on_grid[0]] = []
+
+
+    def _compute_1k_euler_simpleechelle(self, simulation, dtCoeff, split_id, old_dir):
+        dt = simulation.timeStep * dtCoeff
+        wait_evts = self.velocity.events + \
+            self._init_events[self.fields_on_grid[0]] + \
+            self.fields_on_grid[0].events
+        nbc = self.fields_on_grid[0].nbComponents
+        evt = self.num_advec_and_remesh(*tuple(
+            [self.velocity.gpu_data[self.direction], ] +
+            [self.fields_on_part[self.fields_on_grid[0]][i]
+             for i in xrange(nbc)] +
+            [self.fields_on_grid[0].gpu_data[i] for i in xrange(nbc)] +
+            [self.gpu_precision(dt), self._cl_mesh_info]),
+                                        wait_for=wait_evts)
+        self.fields_on_grid[0].events.append(evt)
+        self._init_events[self.fields_on_grid[0]] = []
+
+    def get_profiling_info(self):
+        for k in [self.copy, self.transpose_xy, self.transpose_xy_r,
+                  self.transpose_xz, self.transpose_xz_r,
+                  self.num_advec_and_remesh,
+                  self.num_advec, self.num_remesh]:
+            if k is not None:
+                for p in k.profile:
+                    self.profiler += p
 
     @debug
     def finalize(self):
         """
-        Free OpenCL memory.
+        Cleaning, if required.
         """
-        for k in [self.copy, self.transpose_xy, self.transpose_xy_r,
-                  self.transpose_xz, self.transpose_xz_r]:
-            if k is not None:
-                for f_timer in k.f_timer:
-                    self.kernels_timer.addFunctionTimer(f_timer)
-        self.timer.addSubTimer(self.kernels_timer, 'Details:')
-        ParticleAdvection.finalize(self)
+        pass
+        # for w in self._rwork:
+        #     self.cl_env.global_deallocation(w)
+        # self.cl_env.global_deallocation(self._cl_mesh_info)
diff --git a/HySoP/hysop/gpu/gpu_particle_advection_1k.py b/HySoP/hysop/gpu/gpu_particle_advection_1k.py
new file mode 100644
index 0000000000000000000000000000000000000000..714601524427429781fb65276c0aa49c10634c90
--- /dev/null
+++ b/HySoP/hysop/gpu/gpu_particle_advection_1k.py
@@ -0,0 +1,179 @@
+"""
+@file gpu_particle_advection_1k.py
+
+Discrete advection representation.
+"""
+from parmepy.gpu.gpu_particle_advection import GPUParticleAdvection
+from parmepy.fields.continuous import Field
+from parmepy.gpu.gpu_discrete import GPUDiscreteField
+from parmepy.gpu.gpu_kernel import KernelLauncher
+
+
+class GPUParticleAdvection1k(GPUParticleAdvection):
+    """
+    Particle advection operator representation on GPU with a single kernel
+    for computing advection and remeshing.
+    """
+
+    def globalMemoryUsagePreview(self, v_shape, shape):
+        r = (self.velocity.nbComponents * v_shape.prod() +
+             2 * self.fields_on_grid[0].nbComponents * shape.prod())
+        return r * self.cl_env.prec_size
+
+    def _buffer_allocations(self):
+        """
+        Allocate OpenCL buffers for velocity and advected field.
+        And one more buffer for advected fields quantities on particles.
+        """
+        ## Velocity.
+        alloc = not isinstance(self.velocity, GPUDiscreteField)
+        GPUDiscreteField.fromField(self.cl_env, self.velocity,
+                                   self.gpu_precision,
+                                   batch_nb=self.batch_nb, batch_d=self.direction)
+        if alloc:
+            self.size_global_alloc += self.velocity.mem_size
+
+        ## Transported field.
+        alloc = not isinstance(self.fields_on_grid[0], GPUDiscreteField)
+        GPUDiscreteField.fromField(self.cl_env,
+                                   self.fields_on_grid[0],
+                                   self.gpu_precision,
+                                   layout=False,
+                                   batch_nb=self.batch_nb, batch_d=self.direction)
+        if alloc:
+            self.size_global_alloc += self.fields_on_grid[0].mem_size
+
+        ## Result scalar
+        if self.fields_on_part is None:
+            self.fields_on_part = [
+                Field(self.fields_on_grid[0].topology.domain,
+                      name="Particle_AdvectedFields",
+                      isVector=self.fields_on_grid[0].isVector
+                      ).discretize(self.fields_on_grid[0].topology)]
+        alloc = not isinstance(self.fields_on_part[0], GPUDiscreteField)
+        GPUDiscreteField.fromField(
+            self.cl_env, self.fields_on_part[0],
+            self.gpu_precision,
+            layout=False,
+            batch_nb=self.batch_nb, batch_d=self.direction)
+        if alloc:
+            self.size_global_alloc += self.fields_on_part[0].mem_size
+
+        is_batch = (self.velocity.isBatch and
+                    self.fields_on_grid[0].isBatch and
+                    self.fields_on_part[0].isBatch)
+        is_not_batch = (not self.velocity.isBatch and
+                        not self.fields_on_grid[0].isBatch and
+                        not self.fields_on_part[0].isBatch)
+        identic_batch = True
+        if is_batch:
+            for vb, adb, padb in zip(
+                    self.velocity.batch_nb[self.direction],
+                    self.fields_on_grid[0].batch_nb[self.direction],
+                    self.fields_on_part[0].batch_nb[self.direction]):
+                identic_batch = identic_batch and (vb == adb and vb == padb)
+
+        if not ((is_batch or is_not_batch) and
+                (not is_batch or (is_batch and identic_batch))):
+            raise RuntimeError("In operator advection on GPU, automatic " +
+                               "batch number computations fails: (different " +
+                               "batch number for same variables use in " +
+                               "different advection operators). " +
+                               "User must give an explicit and identical " +
+                               "batch_nb parameter for all GPU advection " +
+                               "operators")
+
+        self.variables = [self.fields_on_grid[0], self.velocity,
+                          self.fields_on_part[0]]
+        self._work = self.fields_on_part
+
+    def _collect_kernels_cl_src(self):
+        """
+        Compile OpenCL sources for advection and remeshing kernel.
+        """
+        from parmepy.methods_keys import Remesh, MultiScale, TimeIntegrator
+        build_options = self.build_options + self._size_constants
+        src, is_noBC, vec, f_space = self._kernel_cfg['advec_and_remesh']
+        gwi, lwi = f_space(self.resol_dir, vec)
+        WINb = lwi[0]
+        build_options += " -D FORMULA=" + self.method[Remesh].__name__.upper()
+        if MultiScale in self.method:
+            if self.method[MultiScale] is not None:
+                build_options += " -D MS_FORMULA="
+                build_options += self.method[MultiScale].__name__.upper()
+        if is_noBC:
+            build_options += " -D WITH_NOBC=1"
+        build_options += " -D WI_NB=" + str(WINb)
+        build_options += " -D PART_NB_PER_WI="
+        build_options += str(self.resol_dir[0] / WINb)
+        build_options += self._constants[self.direction]
+        ## Build code
+        src = [s.replace('RKN', self.method[TimeIntegrator].__name__.lower())
+               for s in src]
+        prg = self.cl_env.build_src(
+            src, build_options, vec,
+            nb_remesh_components=self.fields_on_part[0].nbComponents)
+
+        self.num_advec_and_remesh = KernelLauncher(
+            prg.advection_and_remeshing, self.cl_env.queue, gwi, lwi)
+
+    def _compute_1c(self, simulation, dtCoeff, split_id, old_dir):
+        dt = simulation.timeStep * dtCoeff
+        wait_evts = self.velocity.events + \
+            self.fields_on_part[0].events + \
+            self.fields_on_grid[0].events
+        evt = self.num_advec_and_remesh(
+            self.velocity.gpu_data[self.direction].data,
+            self.fields_on_part[0].gpu_data[0].data,
+            self.fields_on_grid[0].gpu_data[0].data,
+            self._num_locMem[0], self._num_locMem[1],
+            self.gpu_precision(dt),
+            self.coord_min[self.direction],
+            self.mesh_size, self.v_mesh_size,
+            wait_for=wait_evts)
+        self.fields_on_grid[0].events.append(evt)
+
+    def _compute_2c(self, simulation, dtCoeff, split_id, old_dir):
+        dt = simulation.timeStep * dtCoeff
+        wait_evts = self.velocity.events + \
+            self.fields_on_part[0].events + \
+            self.fields_on_grid[0].events
+        evt = self.num_advec_and_remesh(
+            self.velocity.gpu_data[self.direction].data,
+            self.fields_on_part[0].gpu_data[0].data,
+            self.fields_on_part[0].gpu_data[1].data,
+            self.fields_on_grid[0].gpu_data[0].data,
+            self.fields_on_grid[0].gpu_data[1].data,
+            self._num_locMem[0], self._num_locMem[1], self._num_locMem[2],
+            self.gpu_precision(dt),
+            self.coord_min[self.direction],
+            self.mesh_size, self.v_mesh_size,
+            wait_for=wait_evts)
+        self.fields_on_grid[0].events.append(evt)
+
+    def _compute_3c(self, simulation, dtCoeff, split_id, old_dir):
+        dt = simulation.timeStep * dtCoeff
+        wait_evts = self.velocity.events + \
+            self.fields_on_part[0].events + \
+            self.fields_on_grid[0].events
+        evt = self.num_advec_and_remesh(
+            self.velocity.gpu_data[self.direction].data,
+            self.fields_on_part[0].gpu_data[0].data,
+            self.fields_on_part[0].gpu_data[1].data,
+            self.fields_on_part[0].gpu_data[2].data,
+            self.fields_on_grid[0].gpu_data[0].data,
+            self.fields_on_grid[0].gpu_data[1].data,
+            self.fields_on_grid[0].gpu_data[2].data,
+            self._num_locMem[0], self._num_locMem[1],
+            self._num_locMem[2], self._num_locMem[3],
+            self.gpu_precision(dt),
+            self.coord_min[self.direction],
+            self.mesh_size, self.v_mesh_size,
+            wait_for=wait_evts)
+        self.fields_on_grid[0].events.append(evt)
+
+    def finalize(self):
+        if self.num_advec_and_remesh.f_timer is not None:
+            for f_timer in self.num_advec_and_remesh.f_timer:
+                self.kernels_timer.addFunctionTimer(f_timer)
+        GPUParticleAdvection.finalize(self)
diff --git a/HySoP/hysop/gpu/gpu_particle_advection_2k.py b/HySoP/hysop/gpu/gpu_particle_advection_2k.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e57dd17646b487eb09d70fb2a42ce802a132776
--- /dev/null
+++ b/HySoP/hysop/gpu/gpu_particle_advection_2k.py
@@ -0,0 +1,225 @@
+"""
+@file gpu_particle_advection_2k.py
+
+Discrete advection representation.
+"""
+from parmepy.gpu.gpu_particle_advection import GPUParticleAdvection
+from parmepy.fields.continuous import Field
+from parmepy.gpu.gpu_discrete import GPUDiscreteField
+from parmepy.gpu.gpu_kernel import KernelLauncher
+
+
+class GPUParticleAdvection2k(GPUParticleAdvection):
+    """
+    Particle advection operator representation on GPU.
+    """
+
+    def globalMemoryUsagePreview(self, v_shape, shape):
+        r = (self.velocity.nbComponents * v_shape.prod() +
+             (2 * self.fields_on_grid[0].nbComponents + 1) * shape.prod())
+        return r * self.cl_env.prec_size
+
+    def _buffer_allocations(self):
+        """
+        Allocate OpenCL buffers for velocity and advected field.
+        And one more buffer for advected fields quantities on particles.
+        And a buffer for storing particle positions.
+        """
+        ## Velocity.
+        alloc = not isinstance(self.velocity, GPUDiscreteField)
+        GPUDiscreteField.fromField(self.cl_env, self.velocity,
+                                   self.gpu_precision,
+                                   batch_nb=self.batch_nb,
+                                   batch_d=self.direction)
+        if alloc:
+            self.size_global_alloc += self.velocity.mem_size
+
+        ## Transported field.
+        alloc = not isinstance(self.fields_on_grid[0], GPUDiscreteField)
+        GPUDiscreteField.fromField(self.cl_env, self.fields_on_grid[0],
+                                   self.gpu_precision,
+                                   layout=False,
+                                   batch_nb=self.batch_nb, batch_d=self.direction)
+        if alloc:
+            self.size_global_alloc += self.fields_on_grid[0].mem_size
+
+        ## Particle position
+        if self.part_position is None:
+            self.part_position = \
+                Field(self.fields_on_grid[0].topology.domain,
+                      name="Particle_Position",
+                      isVector=False
+                      ).discretize(self.fields_on_grid[0].topology)
+        alloc = not isinstance(self.part_position, GPUDiscreteField)
+        GPUDiscreteField.fromField(self.cl_env, self.part_position,
+                                   self.gpu_precision, layout=False,
+                                   batch_nb=self.batch_nb, batch_d=self.direction)
+        if alloc:
+            self.size_global_alloc += self.part_position.mem_size
+
+        ## Result scalar
+        if self.fields_on_part is None:
+            self.fields_on_part = [
+                Field(self.fields_on_grid[0].topology.domain,
+                      name="Particle_AdvectedFields",
+                      isVector=self.fields_on_grid[0].isVector
+                      ).discretize(self.fields_on_grid[0].topology)]
+        alloc = not isinstance(self.fields_on_part[0], GPUDiscreteField)
+        GPUDiscreteField.fromField(self.cl_env, self.fields_on_part[0],
+                                   self.gpu_precision, layout=False,
+                                   batch_nb=self.batch_nb, batch_d=self.direction)
+        if alloc:
+            self.size_global_alloc += self.fields_on_part[0].mem_size
+
+        is_batch = (self.velocity.isBatch and
+                    self.fields_on_grid[0].isBatch and
+                    self.part_position.isBatch and
+                    self.fields_on_part[0].isBatch)
+        is_not_batch = (not self.velocity.isBatch and
+                        not self.fields_on_grid[0].isBatch and
+                        not self.part_position.isBatch and
+                        not self.fields_on_part[0].isBatch)
+        identic_batch = True
+        if is_batch:
+            for vb, adb, pposb, padb in zip(
+                    self.velocity.batch_nb[self.direction],
+                    self.fields_on_grid[0].batch_nb[self.direction],
+                    self.part_position.batch_nb[self.direction],
+                    self.fields_on_part[0].batch_nb[self.direction]):
+                identic_batch = identic_batch and \
+                    (vb == adb and vb == padb and vb == pposb)
+
+        if not ((is_batch or is_not_batch) and
+                (not is_batch or (is_batch and identic_batch))):
+            raise RuntimeError("In operator advection on GPU, automatic " +
+                               "batch number computations fails: (different " +
+                               "batch number for same variables use in " +
+                               "different advection operators). " +
+                               "User must give an explicit and identical " +
+                               "batch_nb parameter for all GPU advection " +
+                               "operators")
+
+        self.variables = [self.fields_on_grid[0], self.velocity,
+                          self.part_position, self.fields_on_part[0]]
+        self._work = self.fields_on_part + [self.part_position]
+
+    def _collect_kernels_cl_src(self):
+        """
+        Compile OpenCL sources for advection and remeshing kernel.
+        """
+        from parmepy.methods_keys import TimeIntegrator, Remesh, MultiScale
+        # Advection
+        build_options = self.build_options + self._size_constants
+        src, is_noBC, vec, f_space = self._kernel_cfg['advec']
+        gwi, lwi = f_space(self.resol_dir, vec)
+        WINb = lwi[0]
+        if MultiScale in self.method:
+            if self.method[MultiScale] is not None:
+                build_options += " -D MS_FORMULA="
+                build_options += self.method[MultiScale].__name__.upper()
+
+        if is_noBC:
+            build_options += " -D WITH_NOBC=1"
+        build_options += " -D WI_NB=" + str(WINb)
+        build_options += self._constants[self.direction]
+        # Build code
+        src = [s.replace('RKN', self.method[TimeIntegrator].__name__.lower())
+               for s in src]
+        prg = self.cl_env.build_src(
+            src,
+            build_options,
+            vec,
+            nb_remesh_components=self.fields_on_part[0].nbComponents)
+
+        self.num_advec = KernelLauncher(
+            prg.advection_kernel, self.cl_env.queue, gwi, lwi)
+
+        # remeshing
+        build_options = self.build_options + self._size_constants
+        src, is_noBC, vec, f_space = self._kernel_cfg['remesh']
+        gwi, lwi = f_space(self.resol_dir, vec)
+        WINb = lwi[0]
+
+        build_options += " -D FORMULA=" + self.method[Remesh].__name__.upper()
+        if is_noBC:
+            build_options += " -D WITH_NOBC=1"
+        build_options += " -D WI_NB=" + str(WINb)
+        build_options += " -D PART_NB_PER_WI="
+        build_options += str(self.resol_dir[0] / WINb)
+        build_options += self._constants[self.direction]
+        ## Build code
+        prg = self.cl_env.build_src(
+            src, build_options, vec,
+            nb_remesh_components=self.fields_on_part[0].nbComponents)
+        self.num_remesh = KernelLauncher(
+            prg.remeshing_kernel, self.cl_env.queue, gwi, lwi)
+
+    def _compute_advec(self, simulation, dtCoeff, split_id, old_dir):
+        dt = simulation.timeStep * dtCoeff
+        wait_evts = self.velocity.events+self.part_position.events
+        # Advection
+        evt = self.num_advec(
+            self.velocity.gpu_data[self.direction].data,
+            self.part_position.gpu_data[0].data,
+            self._num_locMem[0],
+            self.gpu_precision(dt),
+            self.coord_min[self.direction],
+            self.mesh_size, self.v_mesh_size,
+            wait_for=wait_evts)
+        self.part_position.events.append(evt)
+
+    def _compute_1c(self, simulation, dtCoeff, split_id, old_dir):
+        self._compute_advec(simulation, dtCoeff, split_id, old_dir)
+        wait_evts = self.part_position.events+self.fields_on_grid[0].events
+        evt = self.num_remesh(
+            self.part_position.gpu_data[0].data,
+            self.fields_on_part[0].gpu_data[0].data,
+            self.fields_on_grid[0].gpu_data[0].data,
+            self._num_locMem[1],
+            self.coord_min[self.direction],
+            self.mesh_size[self.direction],
+            wait_for=wait_evts)
+        self.fields_on_grid[0].events.append(evt)
+
+    def _compute_2c(self, simulation, dtCoeff, split_id, old_dir):
+        self._compute_advec(simulation, dtCoeff, split_id, old_dir)
+        wait_evts = self.part_position.events+self.fields_on_grid[0].events
+        evt = self.num_remesh(
+            self.part_position.gpu_data[0].data,
+            self.fields_on_part[0].gpu_data[0].data,
+            self.fields_on_part[0].gpu_data[1].data,
+            self.fields_on_grid[0].gpu_data[0].data,
+            self.fields_on_grid[0].gpu_data[1].data,
+            self._num_locMem[1],
+            self._num_locMem[2],
+            self.coord_min[self.direction],
+            self.mesh_size[self.direction],
+            wait_for=wait_evts)
+        self.fields_on_grid[0].events.append(evt)
+
+    def _compute_3c(self, simulation, dtCoeff, split_id, old_dir):
+        self._compute_advec(simulation, dtCoeff, split_id, old_dir)
+        wait_evts = self.part_position.events+self.fields_on_grid[0].events
+        evt = self.num_remesh(
+            self.part_position.gpu_data[0].data,
+            self.fields_on_part[0].gpu_data[0].data,
+            self.fields_on_part[0].gpu_data[1].data,
+            self.fields_on_part[0].gpu_data[2].data,
+            self.fields_on_grid[0].gpu_data[0].data,
+            self.fields_on_grid[0].gpu_data[1].data,
+            self.fields_on_grid[0].gpu_data[2].data,
+            self._num_locMem[1],
+            self._num_locMem[2],
+            self._num_locMem[3],
+            self.coord_min[self.direction],
+            self.mesh_size[self.direction],
+            wait_for=wait_evts)
+        self.fields_on_grid[0].events.append(evt)
+
+    def finalize(self):
+        if self.num_advec.f_timer is not None:
+            for f_timer in self.num_advec.f_timer:
+                self.kernels_timer.addFunctionTimer(f_timer)
+            for f_timer in self.num_remesh.f_timer:
+                self.kernels_timer.addFunctionTimer(f_timer)
+        GPUParticleAdvection.finalize(self)
diff --git a/HySoP/hysop/gpu/tests/test_advection_nullVelocity.py b/HySoP/hysop/gpu/tests/test_advection_nullVelocity.py
index 087fd71d56333d011637d17c6c0ec5b8e0f0f7c2..346375b44119917732b08f90954f22fd839c7e2a 100644
--- a/HySoP/hysop/gpu/tests/test_advection_nullVelocity.py
+++ b/HySoP/hysop/gpu/tests/test_advection_nullVelocity.py
@@ -12,17 +12,18 @@ from parmepy.methods_keys import TimeIntegrator, Interpolation, Remesh, \
 from parmepy.numerics.integrators.runge_kutta2 import RK2
 from parmepy.numerics.interpolation import Linear
 from parmepy.numerics.remeshing import L2_1, L4_2, L6_3, M8Prime
+from parmepy.tools.parameters import Discretization
 
 
 def setup_2D():
-    box = Box(2, length=[1., 1.], origin=[0., 0.])
+    box = Box(length=[1., 1.], origin=[0., 0.])
     scal = Field(domain=box, name='Scalar')
     velo = Field(domain=box, name='Velocity', isVector=True)
     return scal, velo
 
 
 def setup_3D():
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar')
     velo = Field(domain=box, name='Velocity', isVector=True)
     return scal, velo
@@ -30,7 +31,7 @@ def setup_3D():
 
 def assertion_2D(scal, velo, advec):
     advec.discretize()
-    advec.setUp()
+    advec.setup()
 
     scal_d = scal.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
@@ -55,8 +56,8 @@ def assertion_2D(scal, velo, advec):
 def assertion_2D_withPython(scal, velo, advec, advec_py):
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
@@ -83,7 +84,7 @@ def assertion_2D_withPython(scal, velo, advec, advec_py):
 
 def assertion_3D(scal, velo, advec):
     advec.discretize()
-    advec.setUp()
+    advec.setup()
 
     scal_d = scal.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
@@ -110,8 +111,8 @@ def assertion_3D(scal, velo, advec):
 def assertion_3D_withPython(scal, velo, advec, advec_py):
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
@@ -136,6 +137,9 @@ def assertion_3D_withPython(scal, velo, advec, advec_py):
     return np.allclose(py_res, scal_d.data[0])
 
 
+d2d = Discretization([33,33])
+d3d = Discretization([17, 17, 17])
+
 # M6 tests
 def test_2D_m6_1k():
     """
@@ -144,9 +148,7 @@ def test_2D_m6_1k():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
@@ -154,9 +156,7 @@ def test_2D_m6_1k():
                               Splitting: 'o2',
                               Precision: PARMES_REAL}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal,discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -174,9 +174,7 @@ def test_2D_m6_2k():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
@@ -194,9 +192,7 @@ def test_2D_m6_1k_sFH():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
@@ -214,9 +210,7 @@ def test_2D_m6_2k_sFH():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
@@ -234,9 +228,7 @@ def test_3D_m6_1k():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
@@ -254,9 +246,7 @@ def test_3D_m6_2k():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
@@ -274,9 +264,7 @@ def test_3D_m6_1k_sFH():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
@@ -294,9 +282,7 @@ def test_3D_m6_2k_sFH():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
@@ -315,9 +301,7 @@ def test_2D_m4_1k():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
@@ -325,9 +309,7 @@ def test_2D_m4_1k():
                               Splitting: 'o2',
                               Precision: PARMES_REAL}
                         )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal,discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -344,9 +326,7 @@ def test_2D_m4_2k():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
@@ -355,9 +335,7 @@ def test_2D_m4_2k():
                               Precision: PARMES_REAL
                               }
                         )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal,discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -374,9 +352,7 @@ def test_2D_m4_1k_sFH():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
@@ -384,9 +360,7 @@ def test_2D_m4_1k_sFH():
                               Splitting: 'o2_FullHalf',
                               Precision: PARMES_REAL},
                         )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal,discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -403,18 +377,14 @@ def test_2D_m4_2k_sFH():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
                               Support: 'gpu_2k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal,discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -431,18 +401,14 @@ def test_3D_m4_1k():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                         )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal,discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -459,18 +425,14 @@ def test_3D_m4_2k():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
                               Support: 'gpu_2k',
                               Splitting: 'o2'}
                             )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal,discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -487,17 +449,13 @@ def test_3D_m4_1k_sFH():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
                               Support: 'gpu_1k',
                               Splitting: 'o2_FullHalf'})
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal,discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -514,18 +472,14 @@ def test_3D_m4_2k_sFH():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
                               Support: 'gpu_2k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal,discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -543,18 +497,14 @@ def test_2D_m8_1k():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal,discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -571,18 +521,14 @@ def test_2D_m8_2k():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_2k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal,discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -599,18 +545,14 @@ def test_2D_m8_1k_sFH():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_1k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal,discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -627,18 +569,14 @@ def test_2D_m8_2k_sFH():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_2k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal,discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -655,18 +593,14 @@ def test_3D_m8_1k():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal,discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -683,18 +617,14 @@ def test_3D_m8_2k():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_2k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal,discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -711,18 +641,14 @@ def test_3D_m8_1k_sFH():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_1k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal,discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -739,18 +665,14 @@ def test_3D_m8_2k_sFH():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_2k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal,discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -767,9 +689,7 @@ def test_2D_l6_2k():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L6_3,
@@ -786,9 +706,7 @@ def test_2D_l6_1k_sFH():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L6_3,
@@ -805,9 +723,7 @@ def test_2D_l6_2k_sFH():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal,discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L6_3,
@@ -824,9 +740,7 @@ def test_3D_l6_1k():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L6_3,
@@ -843,9 +757,7 @@ def test_3D_l6_2k():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L6_3,
@@ -862,9 +774,7 @@ def test_3D_l6_1k_sFH():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L6_3,
@@ -881,9 +791,7 @@ def test_3D_l6_2k_sFH():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L6_3,
@@ -894,22 +802,17 @@ def test_3D_l6_2k_sFH():
 
 
 def test_rectangular_domain2D():
-    box = Box(2, length=[1., 1.], origin=[0., 0.])
+    box = Box(length=[1., 1.], origin=[0., 0.])
     scal = Field(domain=box, name='Scalar')
     velo = Field(domain=box, name='Velocity', isVector=True)
-
-    advec = Advection(velo, scal,
-                      resolutions={velo: [65, 33],
-                                   scal: [65, 33]},
+    advec = Advection(velo, scal, discretization=Discretization([65, 33]),
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [65, 33],
-                                      scal: [65, 33]},
+    advec_py = Advection(velo, scal, discretization=Discretization([65, 33]),
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -918,8 +821,8 @@ def test_rectangular_domain2D():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
@@ -945,22 +848,18 @@ def test_rectangular_domain2D():
 
 
 def test_rectangular_domain3D():
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar')
     velo = Field(domain=box, name='Velocity', isVector=True)
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [65, 33, 17],
-                                   scal: [65, 33, 17]},
+    advec = Advection(velo, scal, discretization=Discretization([65, 33, 17]),
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [65, 33, 17],
-                                      scal: [65, 33, 17]},
+    advec_py = Advection(velo, scal, discretization=Discretization([65, 33, 17]),
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -969,8 +868,8 @@ def test_rectangular_domain3D():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
@@ -998,22 +897,18 @@ def test_rectangular_domain3D():
 
 
 def test_2D_vector():
-    box = Box(2, length=[1., 1.], origin=[0., 0.])
+    box = Box(length=[1., 1.], origin=[0., 0.])
     scal = Field(domain=box, name='Scalar', isVector=True)
     velo = Field(domain=box, name='Velocity', isVector=True)
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [129, 129],
-                                   scal: [129, 129]},
+    advec = Advection(velo, scal, discretization=Discretization([129, 129]),
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [129, 129],
-                                      scal: [129, 129]},
+    advec_py = Advection(velo, scal, discretization=Discretization([129, 129]),
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -1022,8 +917,8 @@ def test_2D_vector():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
@@ -1055,22 +950,18 @@ def test_2D_vector():
 
 
 def test_3D_vector():
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar', isVector=True)
     velo = Field(domain=box, name='Velocity', isVector=True)
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal,discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                         )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal,discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -1079,8 +970,8 @@ def test_3D_vector():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
diff --git a/HySoP/hysop/gpu/tests/test_advection_randomVelocity.py b/HySoP/hysop/gpu/tests/test_advection_randomVelocity.py
index f457b40e44d12b9cbfba9c585ac1ce6e26b100ca..22e2071c6e0a20c0b9971776f2f44faeae3c5e29 100644
--- a/HySoP/hysop/gpu/tests/test_advection_randomVelocity.py
+++ b/HySoP/hysop/gpu/tests/test_advection_randomVelocity.py
@@ -12,17 +12,18 @@ from parmepy.methods_keys import TimeIntegrator, Interpolation, Remesh, \
 from parmepy.numerics.integrators.runge_kutta2 import RK2
 from parmepy.numerics.interpolation import Linear
 from parmepy.numerics.remeshing import L2_1, L4_2, M8Prime
+from parmepy.tools.parameters import Discretization
 
 
 def setup_2D():
-    box = Box(2, length=[1., 1.], origin=[0., 0.])
+    box = Box(length=[1., 1.], origin=[0., 0.])
     scal = Field(domain=box, name='Scalar')
     velo = Field(domain=box, name='Velocity', isVector=True)
     return scal, velo
 
 
 def setup_3D():
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar')
     velo = Field(domain=box, name='Velocity', isVector=True)
     return scal, velo
@@ -31,53 +32,63 @@ def setup_3D():
 def assertion_2D_withPython(scal, velo, advec, advec_py):
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
     scal_d.data[0][...] = np.asarray(
         np.random.random(scal_d.data[0].shape),
         dtype=PARMES_REAL, order=ORDER)
-    velo_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
+    velo_d.data[0][...] = np.ones(scal_d.data[0].shape, 
+                                  dtype=PARMES_REAL, order=ORDER)
+    #velo_d.data[0][...] = np.asarray(
+    #    np.random.random(scal_d.data[0].shape),
+    #    dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
+    velo_d.data[1][...] = np.ones(scal_d.data[0].shape, 
+                                  dtype=PARMES_REAL, order=ORDER)
+    #velo_d.data[1][...] = np.asarray(
+    #    np.random.random(scal_d.data[0].shape),
+    #    dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
     scal_d.toDevice()
     velo_d.toDevice()
 
-    advec_py.apply(Simulation(tinit=0., tend=0.001, nbIter=1))
-    advec.apply(Simulation(tinit=0., tend=0.001, nbIter=1))
+    advec_py.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
+    advec.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
 
     py_res = scal_d.data[0].copy()
     scal_d.toHost()
 
     advec.finalize()
-    assert np.allclose(py_res, scal_d.data[0], rtol=5e-02, atol=5e-05)
+    assert np.allclose(py_res, scal_d.data[0], rtol=1e-04, atol=1e-07)
 
 
 def assertion_3D_withPython(scal, velo, advec, advec_py):
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
     scal_d.data[0][...] = np.asarray(
         np.random.random(scal_d.data[0].shape),
         dtype=PARMES_REAL, order=ORDER)
-    velo_d.data[0][...] = np.zeros_like(
-        scal_d.data[0],
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = np.zeros_like(
-        scal_d.data[0],
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
-    velo_d.data[2][...] = np.zeros_like(
-        scal_d.data[0],
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[2])
+    velo_d.data[0][...] = np.ones(scal_d.data[0].shape,
+                                  dtype=PARMES_REAL, order=ORDER)
+#np.zeros_like(
+#        scal_d.data[0],
+#        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
+    velo_d.data[1][...] = np.ones(scal_d.data[0].shape,
+                                  dtype=PARMES_REAL, order=ORDER)
+#np.zeros_like(
+#        scal_d.data[0],
+#        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
+    velo_d.data[2][...] = np.ones(scal_d.data[0].shape,
+                                  dtype=PARMES_REAL, order=ORDER)
+#np.zeros_like(
+#        scal_d.data[0],
+#        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[2])
     scal_d.toDevice()
     velo_d.toDevice()
 
@@ -88,8 +99,9 @@ def assertion_3D_withPython(scal, velo, advec, advec_py):
     scal_d.toHost()
 
     advec.finalize()
-    assert np.allclose(py_res, scal_d.data[0], rtol=5e-02, atol=5e-05)
+    assert np.allclose(py_res, scal_d.data[0], rtol=1e-04, atol=1e-07)
 
+d2d = Discretization([33, 33])
 
 # M6 testing
 def test_2D_m6_1k():
@@ -99,18 +111,14 @@ def test_2D_m6_1k():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17],
-                                   scal: [17, 17]},
+    advec = Advection(velo, scal, discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
                               Support: 'gpu_2k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17],
-                                      scal: [17, 17]},
+    advec_py = Advection(velo, scal, discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -127,18 +135,14 @@ def test_2D_m6_2k():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal, discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_2k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal, discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -155,18 +159,14 @@ def test_2D_m6_1k_sFH():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal, discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_2k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal, discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -183,18 +183,14 @@ def test_2D_m6_2k_sFH():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal, discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_2k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal, discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -211,18 +207,14 @@ def test_3D_m6_1k():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -239,18 +231,14 @@ def test_3D_m6_2k():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_2k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -267,18 +255,14 @@ def test_3D_m6_1k_sFH():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_1k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -295,18 +279,14 @@ def test_3D_m6_2k_sFH():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_2k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -324,18 +304,14 @@ def test_2D_m4_1k():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal, discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal, discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -352,18 +328,14 @@ def test_2D_m4_2k():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal, discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
                               Support: 'gpu_2k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal, discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -380,18 +352,14 @@ def test_2D_m4_1k_sFH():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal, discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
                               Support: 'gpu_1k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal, discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -408,18 +376,14 @@ def test_2D_m4_2k_sFH():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal, discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
                               Support: 'gpu_2k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal, discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -436,22 +400,18 @@ def test_3D_m4_1k():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
-                                 Support: 'gpu_1k',
+                                 Support: '',
                                  Splitting: 'o2'},
                          )
     assertion_3D_withPython(scal, velo, advec, advec_py)
@@ -464,18 +424,14 @@ def test_3D_m4_2k():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
                               Support: 'gpu_2k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -492,18 +448,14 @@ def test_3D_m4_1k_sFH():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
                               Support: 'gpu_1k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -520,18 +472,14 @@ def test_3D_m4_2k_sFH():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L2_1,
                               Support: 'gpu_2k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -549,18 +497,14 @@ def test_2D_m8_1k():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal, discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal, discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -577,18 +521,14 @@ def test_2D_m8_2k():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal, discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_2k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal, discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -605,18 +545,14 @@ def test_2D_m8_1k_sFH():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal, discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_1k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal, discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -633,18 +569,14 @@ def test_2D_m8_2k_sFH():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [33, 33],
-                                   scal: [33, 33]},
+    advec = Advection(velo, scal, discretization=d2d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_2k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [33, 33],
-                                      scal: [33, 33]},
+    advec_py = Advection(velo, scal, discretization=d2d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -653,6 +585,7 @@ def test_2D_m8_2k_sFH():
                          )
     assertion_2D_withPython(scal, velo, advec, advec_py)
 
+d3d = Discretization([17, 17, 17])
 
 def test_3D_m8_1k():
     """
@@ -661,18 +594,14 @@ def test_3D_m8_1k():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -689,18 +618,14 @@ def test_3D_m8_2k():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_2k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -717,18 +642,14 @@ def test_3D_m8_1k_sFH():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_1k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -745,18 +666,14 @@ def test_3D_m8_2k_sFH():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: M8Prime,
                               Support: 'gpu_2k',
                               Splitting: 'o2_FullHalf'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -767,22 +684,18 @@ def test_3D_m8_2k_sFH():
 
 
 def test_rectangular_domain2D():
-    box = Box(2, length=[1., 1.], origin=[0., 0.])
+    box = Box(length=[1., 1.], origin=[0., 0.])
     scal = Field(domain=box, name='Scalar')
     velo = Field(domain=box, name='Velocity', isVector=True)
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [65, 33],
-                                   scal: [65, 33]},
+    advec = Advection(velo, scal, discretization=Discretization([65, 33]),
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [65, 33],
-                                      scal: [65, 33]},
+    advec_py = Advection(velo, scal, discretization=Discretization([65, 33]),
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -791,19 +704,23 @@ def test_rectangular_domain2D():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
     scal_d.data[0][...] = np.asarray(np.random.random(scal_d.data[0].shape),
                                      dtype=PARMES_REAL, order=ORDER)
-    velo_d.data[0][...] = np.asarray(
-        np.random.random(velo_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = np.asarray(
-        np.random.random(velo_d.data[1].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
+    velo_d.data[0][...] = np.ones(scal_d.data[0].shape,
+                                  dtype=PARMES_REAL, order=ORDER)
+#np.asarray(
+#        np.random.random(velo_d.data[0].shape),
+#        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
+    velo_d.data[1][...] = np.ones(scal_d.data[0].shape,
+                                  dtype=PARMES_REAL, order=ORDER)
+#np.asarray(
+#        np.random.random(velo_d.data[1].shape),
+#        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
 
     scal_d.toDevice()
     velo_d.toDevice()
@@ -814,27 +731,23 @@ def test_rectangular_domain2D():
     py_res = scal_d.data[0].copy()
     scal_d.toHost()
 
-    assert np.allclose(py_res, scal_d.data[0], rtol=5e-05, atol=5e-05)
+    assert np.allclose(py_res, scal_d.data[0], rtol=1e-04, atol=1e-07)
     advec.finalize()
 
 
 def test_rectangular_domain3D():
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar')
     velo = Field(domain=box, name='Velocity', isVector=True)
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [65, 33, 17],
-                                   scal: [65, 33, 17]},
+    advec = Advection(velo, scal, discretization=Discretization([65, 33, 17]),
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [65, 33, 17],
-                                      scal: [65, 33, 17]},
+    advec_py = Advection(velo, scal, discretization=Discretization([65, 33, 17]),
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -843,52 +756,54 @@ def test_rectangular_domain3D():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
     scal_d.data[0][...] = np.asarray(np.random.random(scal_d.data[0].shape),
                                      dtype=PARMES_REAL, order=ORDER)
-    velo_d.data[0][...] = np.asarray(
-        np.random.random(velo_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = np.asarray(
-        np.random.random(velo_d.data[1].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
-    velo_d.data[2][...] = np.asarray(
-        np.random.random(velo_d.data[2].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[2])
+    velo_d.data[0][...] = np.ones(scal_d.data[0].shape,
+                                  dtype=PARMES_REAL, order=ORDER)
+#np.asarray(
+#        np.random.random(velo_d.data[0].shape),
+#        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
+    velo_d.data[1][...] = np.ones(scal_d.data[0].shape,
+                                  dtype=PARMES_REAL, order=ORDER)
+#np.asarray(
+#        np.random.random(velo_d.data[1].shape),
+#        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
+    velo_d.data[2][...] = np.ones(scal_d.data[0].shape,
+                                  dtype=PARMES_REAL, order=ORDER)
+#np.asarray(
+#        np.random.random(velo_d.data[2].shape),
+#        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[2])
     scal_d.toDevice()
     velo_d.toDevice()
 
-    advec_py.apply(Simulation(tinit=0., tend=0.01, nbIter=1))
-    advec.apply(Simulation(tinit=0., tend=0.01, nbIter=1))
+    advec_py.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
+    advec.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
 
     py_res = scal_d.data[0].copy()
     scal_d.toHost()
 
-    assert np.allclose(py_res, scal_d.data[0], rtol=5e-02, atol=5e-05)
+    assert np.allclose(py_res, scal_d.data[0], rtol=1e-04, atol=1e-07)
     advec.finalize()
 
 
 def test_vector_2D():
-    box = Box(2, length=[1., 1.], origin=[0., 0.])
+    box = Box(length=[1., 1.], origin=[0., 0.])
     scal = Field(domain=box, name='Scalar', isVector=True)
     velo = Field(domain=box, name='Velocity', isVector=True)
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [129, 129],
-                                   scal: [129, 129]},
+    advec = Advection(velo, scal, discretization=Discretization([129, 129]),
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [129, 129],
-                                      scal: [129, 129]},
+    advec_py = Advection(velo, scal, discretization=Discretization([129, 129]),
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -897,55 +812,54 @@ def test_vector_2D():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
     scal_d.data[0][...] = np.asarray(np.random.random(scal_d.data[0].shape),
                                      dtype=PARMES_REAL, order=ORDER)
-    scal_d.data[1][...] = scal_d.data[0]
-    # np.asarray(np.random.random(scal_d.data[1].shape),
-    #     dtype=PARMES_REAL, order=ORDER)
-    velo_d.data[0][...] = np.asarray(
-        np.random.random(velo_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = np.asarray(
-        np.random.random(velo_d.data[1].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
+    scal_d.data[1][...] =  np.asarray(np.random.random(scal_d.data[1].shape),
+                                      dtype=PARMES_REAL, order=ORDER)
+    velo_d.data[0][...] = np.ones(scal_d.data[0].shape,
+                                  dtype=PARMES_REAL, order=ORDER)
+#np.asarray(
+#        np.random.random(velo_d.data[0].shape),
+#        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
+    velo_d.data[1][...] = np.ones(scal_d.data[0].shape,
+                                  dtype=PARMES_REAL, order=ORDER)
+#np.asarray(
+#        np.random.random(velo_d.data[1].shape),
+#        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
     scal_d.toDevice()
     velo_d.toDevice()
 
     print np.max(scal_d.data[0] - scal_d.data[1])
-    advec_py.apply(Simulation(tinit=0., tend=0.01, nbIter=1))
-    advec.apply(Simulation(tinit=0., tend=0.01, nbIter=1))
+    advec_py.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
+    advec.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
 
     py_res_X = scal_d.data[0].copy()
     py_res_Y = scal_d.data[1].copy()
     scal_d.toHost()
 
-    assert np.allclose(py_res_X, scal_d.data[0], rtol=5e-02, atol=5e-05)
-    assert np.allclose(py_res_Y, scal_d.data[1], rtol=5e-02, atol=5e-05)
+    assert np.allclose(py_res_X, scal_d.data[0], rtol=1e-04, atol=1e-07)
+    assert np.allclose(py_res_Y, scal_d.data[1], rtol=1e-04, atol=1e-07)
     advec.finalize()
 
 
 def test_vector_3D():
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar', isVector=True)
     velo = Field(domain=box, name='Velocity', isVector=True)
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d,
                       method={TimeIntegrator: RK2,
                               Interpolation: Linear,
                               Remesh: L4_2,
                               Support: 'gpu_1k',
                               Splitting: 'o2'}
                       )
-    advec_py = Advection(velo, scal,
-                         resolutions={velo: [17, 17, 17],
-                                      scal: [17, 17, 17]},
+    advec_py = Advection(velo, scal, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
@@ -954,8 +868,8 @@ def test_vector_3D():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
@@ -965,27 +879,33 @@ def test_vector_3D():
                                      dtype=PARMES_REAL, order=ORDER)
     scal_d.data[2][...] = np.asarray(np.random.random(scal_d.data[0].shape),
                                      dtype=PARMES_REAL, order=ORDER)
-    velo_d.data[0][...] = np.asarray(
-        np.random.random(velo_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = np.asarray(
-        np.random.random(velo_d.data[1].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
-    velo_d.data[2][...] = np.asarray(
-        np.random.random(velo_d.data[2].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[2])
+    velo_d.data[0][...] = np.ones(scal_d.data[0].shape,
+                                  dtype=PARMES_REAL, order=ORDER)
+#np.asarray(
+#        np.random.random(velo_d.data[0].shape),
+#        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
+    velo_d.data[1][...] = np.ones(scal_d.data[0].shape,
+                                  dtype=PARMES_REAL, order=ORDER)
+#np.asarray(
+#        np.random.random(velo_d.data[1].shape),
+#        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
+    velo_d.data[2][...] = np.ones(scal_d.data[0].shape,
+                                  dtype=PARMES_REAL, order=ORDER)
+#np.asarray(
+#        np.random.random(velo_d.data[2].shape),
+#        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[2])
     scal_d.toDevice()
     velo_d.toDevice()
 
-    advec_py.apply(Simulation(tinit=0., tend=0.01, nbIter=1))
-    advec.apply(Simulation(tinit=0., tend=0.01, nbIter=1))
+    advec_py.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
+    advec.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
 
     py_res_X = scal_d.data[0].copy()
     py_res_Y = scal_d.data[1].copy()
     py_res_Z = scal_d.data[2].copy()
     scal_d.toHost()
 
-    assert np.allclose(py_res_X, scal_d.data[0], rtol=5e-02, atol=5e-05)
-    assert np.allclose(py_res_Y, scal_d.data[1], rtol=5e-02, atol=5e-05)
-    assert np.allclose(py_res_Z, scal_d.data[2], rtol=5e-02, atol=5e-05)
+    assert np.allclose(py_res_X, scal_d.data[0], rtol=1e-04, atol=1e-07)
+    assert np.allclose(py_res_Y, scal_d.data[1], rtol=1e-04, atol=1e-07)
+    assert np.allclose(py_res_Z, scal_d.data[2], rtol=1e-04, atol=1e-07)
     advec.finalize()
diff --git a/HySoP/hysop/mpi/__init__.py b/HySoP/hysop/mpi/__init__.py
index 828c54382374a3a00deb2a7a5769df8b16f920f4..b20988c36fe5d95c9fb6e1461c73f20626cb55ea 100644
--- a/HySoP/hysop/mpi/__init__.py
+++ b/HySoP/hysop/mpi/__init__.py
@@ -18,8 +18,7 @@ At the time we use mpi4py : http://mpi4py.scipy.org
 ## Everything concerning the chosen mpi implementation is hidden in main_var
 # Why? --> to avoid that things like mpi4py. ... spread everywhere in the
 # soft so to ease a change of this implementation (if needed).
-import main_var
-
+from parmepy.mpi import main_var
 ## A list of mpi variables that can be "seen" by user
 ##  MPI underlying implementation
 MPI = main_var.MPI
diff --git a/HySoP/hysop/mpi/__init__.pyc b/HySoP/hysop/mpi/__init__.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..01757e793aa58dbaffcb49041d31df4bd02b8fc6
Binary files /dev/null and b/HySoP/hysop/mpi/__init__.pyc differ
diff --git a/HySoP/hysop/mpi/__pycache__/topology.cpython-27-PYTEST.pyc b/HySoP/hysop/mpi/__pycache__/topology.cpython-27-PYTEST.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c19c9210a54bfa125181a37c42d6911ddf6aa68
Binary files /dev/null and b/HySoP/hysop/mpi/__pycache__/topology.cpython-27-PYTEST.pyc differ
diff --git a/HySoP/hysop/mpi/bridge.py b/HySoP/hysop/mpi/bridge.py
index 7cc1be3d424121609147fe68db6213434bfe26ce..98c060ee7c41ac32fa2fa3b7013485542d4d4d59 100644
--- a/HySoP/hysop/mpi/bridge.py
+++ b/HySoP/hysop/mpi/bridge.py
@@ -1,410 +1,146 @@
 """
 @file bridge.py
 Tools to compute the intersection between
-two topologies.
+two parmes topologies.
 """
-from parmepy.constants import np, debug
-from parmepy.mpi.main_var import MPI
-import parmepy.tools.numpywrappers as npw
-from parmepy.mpi.topology import topotools
+from parmepy.mpi.topology import Cartesian, topotools
+from parmepy.tools.misc import utils
 
 
 class Bridge(object):
     """
-    A Bridge is the definition of what must be exchanged between two topologies
-    and how to do so.
-
-    For a bridge between topo1 and topo2, it will provide :
-    bridge.sendTo = [[rk, i, j, k, l, ...]]
-    bridge.recvFrom = [[rk, i, j, k, l, ...]]
-    where each line corresponds to a message that must be send
-    from current mpi process to process rk for sendTo or
-    receive by current mpi process from process rk for recvFrom.
-    The other columns give the numbers of the node of the mesh
-    that must be exchanged.
-    Warning : index numbers corresponds to local nodes numbers.
-
-    In sendTo/recvFrom :
-    * First column : dest/source of the message.
-    Warning : rank number is given in main_comm.
-    * second-third columns : first and last index of nodes in the first dir
-    * 4th-5th : first and last index of nodes in the second dir
-    * and so on depending on the size of the domain
-    associated to the topologies.
-
+    todo
     """
 
-    @debug
-    def __init__(self, topoFrom, topoTo, parentComm=None):
+    def __init__(self, source, target):
         """
-        Bridge constructor.
-        @param source topology
-        @param targeted topology
-        @param parent communicator, if None, use topoFrom.parent()
+        @param source : topology that owns the source mesh
+        @param target : topology of the targeted mesh
         """
-        ## Source topology
-        self.topoFrom = topoFrom
-        ## Targeted topology
-        self.topoTo = topoTo
-
-        # Both topologies must be uptodate
-        assert(topoFrom.isUpToDate)
-        assert(topoTo.isUpToDate)
-
-        ## recvFrom[rk] returns a list of indices that should be
-        ## received from process rk and copied into local field.
-        ## Usage :  mpi.recv(rk, ..., yourField[recvFrom[rk]],...)
-        self.recvFrom = {}
-
-        ## sendTo[rk] returns a list of indices that should be
-        ## sent from current process to process rk.
-        ## Usage :  mpi.send(rk, ..., yourField[sendTo[rk]],...)
-        self.sendTo = {}
-
-        ## List of local indices that should be copied from data of
-        ## topofrom to data of topoTo for the current process.
-        ## Usage:
-        ## field_topoto[ito] = field_topofrom[ifrom]
-        ## --> no mpi messages
-        self.ifrom = []
-        ## List of local indices that should be updated on the
-        ## current process for topoTo from values on the
-        ## same process for topoFrom.
-        ## Usage:
-        ## field_topoto[ito] = field_topofrom[ifrom]
-        ## --> no mpi messages
-        self.ito = []
-
-        self.hasLocalInter = True
-
-        ## Parent communicator
-        ## Todo : define some proper conditions for compatibility
-        ## between topoFrom, topoTo and parent:
-        ## - same size
-        ## - same domain
-        ## - common processus ...
-        ## At the time we check that both topo have
-        ## the same comm_origin.
-        if parentComm is None:
-            self._parentComm = self.topoFrom.parent()
-        else:
-            self._parentComm = parentComm
-        self._parentRank = self._parentComm.Get_rank()
-        self._parentSize = self._parentComm.Get_size()
-        if self.topoFrom == self.topoTo:
+        # -- All dictionnaries belows used rank number (in parent comm)
+        # as keys. --
+        # Dictionnary of indices of grid points to be received on target.
+        self._recv_indices = {}
+        # Dictionnary of indices of grid points to be sent from sourceId
+        self._send_indices = {}
+        # Dictionnary of MPI derived types used for MPI receive
+        self._recv_types = None
+        # Dictionnary of MPI derived types used for MPI send.
+        self._send_types = None
+
+        # The communicator that will be used in this bridge.
+        self.comm = None
+        # current rank in this comm
+        self._rank = None
+
+        self._source = source
+        self._target = target
+        self._check_topologies()
+        # nothing to be done ...
+        if source == target:
             return
 
-        assert(self.topoTo.parent() is self._parentComm)
-
-        # Both topologies must have the
-        # same number of mpi processes.
-        assert topoFrom.size == topoTo.size
-
-        dom = topoFrom.domain
-        
-        # 1 - Collect global mesh indices for each topology
-        iglobFrom = topotools.collectGlobalIndices(topoFrom, self._parentComm)
-        iglobTo = topotools.collectGlobalIndices(topoTo, self._parentComm)
-        
-        # Connectivity :
-        sTo = np.zeros((0, 1 + dom.dimension * 2), dtype=np.int32)
-        indexFrom = []
-        for d in range(dom.dimension):
-            indexFrom.append(range(iglobFrom[self._parentRank, d * 2],
-                                   iglobFrom[self._parentRank, d * 2 + 1] + 1))
-
-        line = np.zeros((1 + 2 * dom.dimension), dtype=np.int32)
-        listRanks = [i for i in range(self._parentSize)
-                     if i != self._parentRank]
-
-        for i in listRanks:
-            line[0] = i
-            hasInter = True
-            for d in range(dom.dimension):
-                indexTo = range(iglobTo[i, d * 2], iglobTo[i, d * 2 + 1] + 1)
-                interRow = [k for k in indexFrom[d] if k in indexTo]
-                interRow.sort()
-                if interRow.__len__():
-                    line[d * 2 + 1] = interRow[0]
-                    line[d * 2 + 2] = interRow[-1]
-                else:
-                    hasInter = False
-                    break
-
-            if hasInter:
-                sTo = np.vstack((sTo, line))
-        
-        # Same operations but for current process
-        localFrom = np.zeros((2 * dom.dimension), dtype=np.int32)
-        for d in range(dom.dimension):
-            
-            indexTo = range(iglobTo[self._parentRank, d * 2],
-                            iglobTo[self._parentRank, d * 2 + 1] + 1)
-          
-            interRow = [k for k in indexFrom[d] if k in indexTo]
-            interRow.sort()
-            if interRow.__len__():
-                localFrom[d * 2] = interRow[0]
-                localFrom[d * 2 + 1] = interRow[-1]
-            else:
-                self.hasLocalInter = False
-                break
-
-        if self.hasLocalInter:
-            localTo = topoTo.toIndexLocal(localFrom)
-            localFrom = topoFrom.toIndexLocal(localFrom)
-            for d in range(dom.dimension):
-                self.ifrom.append(slice(localFrom[2 * d],
-                                        localFrom[2 * d + 1] + 1))
-                self.ito.append(slice(localTo[2 * d],
-                                      localTo[2 * d + 1] + 1))
-
-        # --- Compute globalConnectivity on process 0 and distribute it ---
-        # Global Connectivity : each line corresponds to a message :
-        # globalConnectivity[i,:] = [i, j, 0, 3, 2, 6]
-        # means that process i must send to process j its mesh
-        # from node 0 to 3 in the first direction and from node 2 to
-        # 6 in the second dir AND that process j must receive the same mesh
-        # from process i. Nodes number given in the global index set.
-        # Warning : process ranks are given in parentComm.
-        globalConnectivity = np.zeros((sTo.shape[0], 2 +
-                                       dom.dimension * 2), dtype=np.int32)
-
-        globalConnectivity[:, 1:] = sTo.copy(order='A')
-        globalConnectivity[:, 0] = self._parentRank
-
-        if self._parentRank == 0:
-            for i in range(1, self._parentSize):
-                temp = self._parentComm.recv(source=i)
-                globalConnectivity = np.vstack((globalConnectivity, temp))
-
+        self._build_send_recv_dict()
+
+    def _check_topologies(self):
+        # First check if source and target are complient
+        msg = 'Bridge error, one or both topologies are None.'
+        msg = 'Bridge error, input source/target must be topologies.'
+        assert isinstance(self._source, Cartesian), msg
+        assert isinstance(self._target, Cartesian), msg
+
+        msg = 'Bridge error, both source/target topologies'
+        msg += ' must have the same parent communicator.'
+        assert topotools.compare_comm(self._source.parent(),
+                                      self._target.parent()), msg
+        # The assert above ensure that source and target hold the same
+        # group of process in the same communication context.
+        self.comm = self._source.parent()
+        self._rank = self.comm.Get_rank()
+
+    def _build_send_recv_dict(self):
+        # Compute local intersections : i.e. find which grid points
+        # are on both source and target mesh.
+
+        # Get global indices of the mesh on source for all mpi processes.
+        indices_source = topotools.gatherGlobalIndices(self._source)
+
+        # Get global indices of the mesh on target for all mpi processes.
+        indices_target = topotools.gatherGlobalIndices(self._target)
+        # From now on, we have indices_source[rk] = global indices (slice)
+        # of grid points of the source on process number rk in parent.
+        # And the same thing for indices_target.
+
+        # Compute the intersections of the mesh on source with every mesh on
+        # target ---> find which part of the local mesh must be sent to who,
+        # which results in the self._send_indices dict.
+        # self._send_indices[i] = [slice(...), slice(...), slice(...)]
+        # means that the current process must send to process i the grid points
+        # defined by the slices above.
+        current = indices_source[self._rank]
+        for rk in indices_target:
+            inter = utils.intersl(current, indices_target[rk])
+            if inter is not None:
+                self._send_indices[rk] = inter
+        # Back to local indices
+        convert = self._source.mesh.convert2local
+        self._send_indices = {rk: convert(self._send_indices[rk])
+                              for rk in self._send_indices}
+
+        # Compute the intersections of the mesh on target with every mesh on
+        # source ---> find which part of the local mesh must recv something
+        # and from who,
+        # which results in the self._recv_indices dict.
+        # self._recv_indices[i] = [slice(...), slice(...), slice(...)]
+        # means that the current process must recv from process i
+        # the grid points defined by the slices above.
+        current = indices_target[self._rank]
+        for rk in indices_source:
+            inter = utils.intersl(current, indices_source[rk])
+            if inter is not None:
+                self._recv_indices[rk] = inter
+
+        convert = self._target.mesh.convert2local
+        self._recv_indices = {rk: convert(self._recv_indices[rk])
+                              for rk in self._recv_indices}
+
+    def hasLocalInter(self):
+        return self._rank in self._send_indices
+
+    def localSourceInd(self):
+        if self._rank in self._send_indices:
+            return self._send_indices[self._rank]
         else:
-            self._parentComm.ssend(globalConnectivity, dest=0)
-
-        ## recvFrom[:,0] rank of the sending process in parentComm
-        ## recvFrom[:,1:] local indices (start,end, ...)
-        ## of the points to be receive for each direction.
-        ## Example (in 2D)
-        ## recvFrom = [[1, 2, 5, 1, 3]]
-        ## means that the current process must receive from process 1
-        ## an array that will be saved at positions of indices 2:5
-        ## in the first dir and 1:3 in second dir
-        rFrom = np.empty((), dtype=np.int32)
-        if self._parentRank == 0:
-            for rk in range(self._parentSize):
-                cond1 = globalConnectivity[:, 1] == rk
-                cond2 = globalConnectivity[:, 0] != rk
-                cond = cond1 & cond2
-                # We keep lines of globalConnectivity that
-                # correspond to sendings to rk
-                sol = np.compress(cond, globalConnectivity, axis=0)
-                # We remove first column from which all elements
-                # must be equal to rk
-                cond1 = np.ones((sol.shape[1]), dtype=np.bool)
-                cond1[1] = False
-                sol = np.compress(cond1, sol, axis=1)
-                # Send to rk the resulting matrix
-                # Each line of this matrix tells that
-                # sol[i, 0] process must send to rk
-                # elements indicated by indices in sol[i, 1:]
-                if(rk != self._parentRank):
-                    self._parentComm.ssend(sol, dest=rk)
-                else:
-                    rFrom = sol.copy()
+            return {}
 
+    def localTargetInd(self):
+        if self._rank in self._recv_indices:
+            return self._recv_indices[self._rank]
         else:
-            rFrom = self._parentComm.recv(source=0)
-
-        # Final setup for recvFrom and sendTo : shift from
-        # global indices to local ones.
-        for row in range(rFrom.shape[0]):
-            rFrom[row, 1:] = topoTo.toIndexLocal(rFrom[row, 1:])
-        for row in range(sTo.shape[0]):
-            sTo[row, 1:] = topoFrom.toIndexLocal(sTo[row, 1:])
+            return {}
 
-        row = 0
-        for rk in rFrom[:, 0]:
-            self.recvFrom[rk] = []
-            for d in range(dom.dimension):
-                self.recvFrom[rk].append(slice(rFrom[row, 2 * d + 1],
-                                               rFrom[row, 2 * d + 2] + 1))
-            row += 1
-
-        row = 0
-        for rk in sTo[:, 0]:
-            self.sendTo[rk] = []
-            for d in range(dom.dimension):
-                self.sendTo[rk].append(slice(sTo[row, 2 * d + 1],
-                                             sTo[row, 2 * d + 2] + 1))
-            row += 1
-            
-    @debug
-    def setUp(self):
-        """
-
-        """
-
-    def __str__(self):
-        """ TopologyBridge info display """
-        s = '======== Bridge from topology ' + str(self.topoFrom.getId())
-        s += ' to topology ' + str(self.topoTo.getId()) + ' ========\n'
-        s += '[' + str(self._parentRank) + '] sendTo :'
-        s += str(self.sendTo) + '\n'
-        s += '[' + str(self._parentRank) + '] recvFrom :'
-        s += str(self.recvFrom) + '\n'
-        s += '[' + str(self._parentRank) + '] iTo :' + str(self.ito) + '\n'
-        s += '[' + str(self._parentRank) + '] iFrom :' + str(self.ifrom) + '\n'
-        s += '=================================\n'
-        return s
-
-
-class Bridge_intercomm(object):
-    """
-    This bridge is defined as the Bridge class.
-
-    First, an intercommunicator is created from the tasks identifiers (used
-    by user to split the parent communicator).
-
-    The mesh indices are exchanged by process across the intercommunicator.
-    Intersections of mesh are computed and indices slices are stored in the
-    transfers dictionnary.
-
-    """
-    @debug
-    def __init__(self, topo, parent_comm, id_from, id_to, proc_tasks):
+    def recvTypes(self):
         """
-        Create a bridge between communicators of a single topology.
-        @param topo : Topology to consider.
-        @param parent_comm : MPI communicator that containts all process
-        involved in this bridge.
-        @param id_from : Identifier for the input task.
-        @param id_to : Identifier for the output task.
-        @param proc_task : Tasks assignment for process.
+        Return the dictionnary of MPI derived types
+        received on targeted topology.
+        @param data_shape : shape (numpy-like) of the original array
+        @return : a dict of MPI types
         """
-        self.topo = topo
-        self.id_from = id_from
-        self.id_to = id_to
-        self.parent_comm = parent_comm
-        self.proc_tasks = proc_tasks
-        parent_rank = self.parent_comm.Get_rank()
-        my_comm = self.topo.comm
-        my_rank = my_comm.Get_rank()
-        dim = self.topo.domain.dimension
-
-        # 0 - Create an intercommunicator
-        # Create_intercomm attributes are:
-        #   - local rank of leader process for current group (always 0)
-        #   - parent communicator
-        #   - rank of leader process in the remote group
-        if self.proc_tasks[parent_rank] == self.id_from:
-            self.inter_comm = my_comm.Create_intercomm(
-                0,
-                self.parent_comm, self.proc_tasks.index(self.id_to))
-        if self.proc_tasks[parent_rank] == self.id_to:
-            self.inter_comm = my_comm.Create_intercomm(
-                0,
-                self.parent_comm, self.proc_tasks.index(self.id_from))
-
-        # 1 - Get global mesh indices of my domain
-        my_start = self.topo.mesh.global_start
-        my_end = self.topo.mesh.global_end
-        my_slices = npw.zeros((dim * 2, ), dtype=np.int32)
-        my_slices[0::2] = my_start
-        my_slices[1::2] = my_end
-
-        # 2 - Get global mesh indices of other domain
-        if self.proc_tasks[parent_rank] == self.id_from:
-            from_size = my_comm.Get_size()
-            to_size = self.inter_comm.Get_remote_size()
-            other_size = to_size
-        if self.proc_tasks[parent_rank] == self.id_to:
-            to_size = my_comm.Get_size()
-            from_size = self.inter_comm.Get_remote_size()
-            other_size = from_size
-
-        # 3 - Get global mesh indices of other topology process
-        # Each process broadcast his mesh indices to all process of other group
-        # from_slices and to_slices are temporary arrays
-        from_slices = npw.zeros((from_size, dim * 2), dtype=np.int32)
-        to_slices = npw.zeros((to_size, dim * 2), dtype=np.int32)
-
-        # 3.1 - each process of the 'from' group broadbast to the 'to' group
-        for rk in xrange(from_size):
-            if self.proc_tasks[parent_rank] == self.id_from:
-                if my_rank == rk:
-                    from_slices[rk, :] = my_slices
-                    self.inter_comm.bcast(from_slices[rk, :],
-                                          root=MPI.ROOT)
-                else:
-                    self.inter_comm.bcast(from_slices[rk, :],
-                                          root=MPI.PROC_NULL)
-            elif self.proc_tasks[parent_rank] == self.id_to:
-                from_slices[rk, :] = self.inter_comm.bcast(from_slices[rk, :],
-                                                           root=rk)
-        # 3.2 - each process of the 'to' group broadbast to the 'from' group
-        for rk in xrange(to_size):
-            if self.proc_tasks[parent_rank] == self.id_to:
-                if my_rank == rk:
-                    to_slices[rk, :] = my_slices
-                    self.inter_comm.bcast(to_slices[rk, :],
-                                          root=MPI.ROOT)
-                else:
-                    self.inter_comm.bcast(to_slices[rk, :],
-                                          root=MPI.PROC_NULL)
-            elif self.proc_tasks[parent_rank] == self.id_from:
-                to_slices[rk, :] = self.inter_comm.bcast(to_slices[rk, :],
-                                                         root=rk)
-        self.parent_comm.Barrier()
+        if self._recv_types is None:
+            data_shape = self._target.mesh.resolution
+            self._recv_types = topotools.createSubArray(self._recv_indices,
+                                                        data_shape)
+        return self._recv_types
 
-        if self.proc_tasks[parent_rank] == self.id_from:
-            other_slices = to_slices
-        elif self.proc_tasks[parent_rank] == self.id_to:
-            other_slices = from_slices
-
-        # 4. - Build connectivity
-        self.transfers = {}
-        for other_rk in xrange(other_size):
-            intersect_slice = npw.zeros((dim * 2), dtype=np.int32)
-            intersect_slice[0::2] = np.maximum(my_slices[0::2],
-                                               other_slices[other_rk, 0::2])
-            intersect_slice[1::2] = np.minimum(my_slices[1::2],
-                                               other_slices[other_rk, 1::2])
-            # Convert intersection to local slices, exluding ghosts
-            if not (intersect_slice[0::2] > intersect_slice[1::2]).any():
-                # compatible intersection (else nothing to transfer)
-                self.transfers[other_rk] = [
-                    (g + i - s, g + j + 1 - s) for i, j, s, g in zip(
-                        intersect_slice[0::2],
-                        intersect_slice[1::2], my_start, self.topo.ghosts)]
-
-    @debug
-    def setUp(self):
+    def sendTypes(self):
         """
-
+        Return the dictionnary of MPI derived types
+        send from source topology.
+        @param data_shape : shape (numpy-like) of the original array
+        @return : a dict of MPI types
         """
-
-# ## Example of broadcast:
-# ## Process 0 of group id_from broadcasting value to group id_to
-# if proc_tasks[parent_rank] == id_from:
-#     if my_rank == 0:
-#         value = 123
-#         inter_comm.bcast(value, root=MPI.ROOT)
-#     else:
-#         value = None
-#         inter_comm.bcast(value, root=MPI.PROC_NULL)
-#     value = comm_s.bcast(value, root=0) # for broad cast within the group
-# elif proc_tasks[parent_rank] == id_to:
-#     value = None
-#     value = inter_comm.bcast(value, root=0)
-# assert value == 123
-
-# ## Example of Point-to-Point send/receve:
-# x = npw.zeros((10, ))
-# if my_rank == 0 and proc_tasks[parent_rank] == id_from:
-#     x = npw.realarray(np.arange(10))
-# if my_rank == 1 and proc_tasks[parent_rank] == id_to:
-#     inter_comm.Recv([x, 10, PARMES_MPI_REAL], source=0, tag=0)
-# if my_rank == 0 and proc_tasks[parent_rank] == id_from:
-#     inter_comm.Send([x, 10, PARMES_MPI_REAL], dest=1, tag=0)
-# if my_rank == 1 and proc_tasks[parent_rank] == id_to:
-#     assert (x == npw.realarray(np.arange(10))).all()
+        if self._send_types is None:
+            data_shape = self._source.mesh.resolution
+            self._send_types = topotools.createSubArray(self._send_indices,
+                                                        data_shape)
+        return self._send_types
diff --git a/HySoP/hysop/mpi/main_var.pyc b/HySoP/hysop/mpi/main_var.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..810d5761e655f4fc2ea7e3906adaa60cb07c0a9c
Binary files /dev/null and b/HySoP/hysop/mpi/main_var.pyc differ
diff --git a/HySoP/hysop/mpi/mesh.py b/HySoP/hysop/mpi/mesh.py
index a0e7c63d79d8beb45f0d9837909eb3bae18c3d3a..8768bc28a14440058eb572cd1073f35ef5e7d540 100644
--- a/HySoP/hysop/mpi/mesh.py
+++ b/HySoP/hysop/mpi/mesh.py
@@ -3,8 +3,10 @@
 Cartesian mesh class for local mesh.
 
 """
-from parmepy.constants import np, PARMES_INTEGER, debug
+from parmepy.constants import debug
 import parmepy.tools.numpywrappers as npw
+from parmepy.tools.parameters import Discretization
+import numpy as np
 
 
 class SubMesh(object):
@@ -18,7 +20,7 @@ class SubMesh(object):
         return object.__new__(cls, *args, **kw)
 
     @debug
-    def __init__(self, topo, g_start, resolution):
+    def __init__(self, domain, discretization, global_start, resolution):
         """
         Example of indices for a 8 point domain with 2 ghosts and 2 procs.
         \code
@@ -43,25 +45,25 @@ class SubMesh(object):
         local start/end : 2/5
         \endcode
         """
-        ## Topology that creates (and owns) this mesh
-        self._topology = topo
         ## Local resolution of this mesh, INCLUDING ghost points
-        self.resolution = np.asarray(resolution, dtype=PARMES_INTEGER)
+        self.resolution = npw.asdimarray(resolution)
         ## Dimension of the mesh
         self.dim = self.resolution.size
         ## index of the lowest "computed" point of this mesh
         ## (in each dir) in the global mesh
-        self.global_start = np.asarray(g_start, dtype=PARMES_INTEGER)
+        self.global_start = npw.asdimarray(global_start)
+        assert isinstance(discretization, Discretization)
+        self.discretization = discretization
+        ghosts = self.discretization.ghosts
         ## index of the upper point (in each dir), global mesh
-        self.global_end = self.global_start + self.resolution - 1\
-            - 2 * topo.ghosts
+        self.global_end = self.global_start + self.resolution - 1 - 2 * ghosts
         ## Mesh step size
-        self.space_step = npw.asarray(topo.domain.length /
-                                      (topo.globalMeshResolution - 1))
+        global_resolution = self.discretization.resolution
+        self.space_step = npw.asarray(domain.length / (global_resolution - 1))
         ## Mesh local indices, only for "computed" points
         ## (i.e. excluding ghosts)
-        self.local_start = topo.ghosts.copy()
-        self.local_end = self.resolution - topo.ghosts - 1
+        self.local_start = ghosts.copy()
+        self.local_end = self.resolution - ghosts - 1
         ## List of indices for computational points (i.e. without ghosts)
         ## usage : field[iCompute] returns the array field for all indices
         ## excluding ghost points.
@@ -70,14 +72,17 @@ class SubMesh(object):
         for d in range(self.dim):
             self.iCompute.append(slice(self.local_start[d],
                                        self.local_end[d] + 1))
+        self.allgrid = []
+        for d in range(self.dim):
+            self.allgrid.append(slice(0, self.resolution[d]))
 
         ## Coordinates of the "lowest" point of this mesh (including ghost)
         ## Warning FP : this strongly depends on where is the origin
         ## of the domain, of the type of boundary conditions
         ## and if origin is on ghosts or "real" points.
-        self.origin = topo.domain.origin.copy()
-        self.origin[:] += self.space_step[:] * \
-            (self.global_start[:] - topo.ghosts[:])
+        self.origin = domain.origin.copy()
+        self.origin[:] += self.space_step[:] * (self.global_start[:]
+                                                - ghosts[:])
 
         self.end = self.origin + self.space_step * (self.resolution - 1)
         if self.dim == 1:
@@ -85,17 +90,17 @@ class SubMesh(object):
             self.coords = tuple([cx, ])
         elif self.dim == 2:
             cx = np.linspace(self.origin[0], self.end[0],
-                             self.resolution[0])[:, np.newaxis]
+                          self.resolution[0])[:, np.newaxis]
             cy = np.linspace(self.origin[1], self.end[1],
-                             self.resolution[1])[np.newaxis, :]
+                          self.resolution[1])[np.newaxis, :]
             self.coords = tuple([cx, cy])
         elif self.dim == 3:
             cx = np.linspace(self.origin[0], self.end[0],
-                             self.resolution[0])[:, np.newaxis, np.newaxis]
+                          self.resolution[0])[:, np.newaxis, np.newaxis]
             cy = np.linspace(self.origin[1], self.end[1],
-                             self.resolution[1])[np.newaxis, :, np.newaxis]
+                          self.resolution[1])[np.newaxis, :, np.newaxis]
             cz = np.linspace(self.origin[2], self.end[2],
-                             self.resolution[2])[np.newaxis, np.newaxis, :]
+                          self.resolution[2])[np.newaxis, np.newaxis, :]
             self.coords = tuple([cx, cy, cz])
 
     def indices(self, tab):
@@ -114,11 +119,91 @@ class SubMesh(object):
         return tuple(ind)
 
     def __str__(self):
-        """ Sub mesh display """
-        s = 'Coords (topo):' + str(self._topology.proc_coords[:])
-        s += ' Sub mesh resolution:' + str(self.resolution) + '\n'
+        """
+        Sub mesh display
+        """
+        s = 'Ghost layer : ' + str(self.discretization.ghosts) + '\n'
+        s += 'Global mesh resolution : ' + str(self.discretization.resolution)
+        s += '\n'
+        s = ' Sub mesh resolution:' + str(self.resolution) + '\n'
         s += 'Starting point global indices :' + str(self.global_start) + '\n'
         s += 'Local indices for "computed" points (excluding ghosts) :\n'
         s += '   start = ' + str(self.local_start)
         s += ', end = ' + str(self.local_end) + '\n'
         return s
+
+    def toIndexLocal(self, glob_index):
+        """
+        Return the local mesh indices from
+        their global indices.
+        Ilocal = Iglobal - _global_start + ghost
+        @param glob_index an array of size domain.dim*2
+        with glob_index = [start_dir1, end_dir1, start_dir2, end_dir2, ...]
+        return an array of indices, same 'setup' as glob_index.
+        """
+        ilocal = npw.asarray(glob_index).copy()
+        ghosts = self.discretization.ghosts
+        ilocal[0::2] = ilocal[0::2] - self.global_start + ghosts
+        ilocal[1::2] = ilocal[1::2] - self.global_start + ghosts
+        return ilocal
+
+    def toIndexLocalFull(self, sl):
+        """
+        Return the local mesh indices from
+        their global indices.
+        Ilocal = Iglobal - _global_start + ghost
+        @param glob_index an array of size domain.dim*2
+        with glob_index = [start_dir1, end_dir1, start_dir2, end_dir2, ...]
+        return an array of indices, same 'setup' as glob_index.
+        """
+        shift = self.local_start - self.global_start
+        imax = self.local_end + self.discretization.ghosts
+        dimension = self.discretization.ghosts.size
+        return [slice(max(sl[i].start + shift[i], 0),
+                      min(sl[i].stop + shift[i], imax[i]))
+                for i in xrange(dimension)]
+
+    def toIndexLocal2(self, sl):
+        """
+        Return the local mesh indices from
+        their global indices.
+        Ilocal = Iglobal - _global_start + ghost
+        @param sl: the list of slices (one per dir) for global indices
+        something like [(start_dir1, end_dir1), ...]
+        @return the same kind of list but in local coordinates
+        """
+        shift = self.local_start - self.global_start
+        imax = self.local_end + 1
+        imin = self.local_start
+        dimension = self.discretization.ghosts.size
+        return [slice(max(sl[i].start + shift[i], imin[i]),
+                      min(sl[i].stop + shift[i], imax[i]))
+                for i in xrange(dimension)]
+
+    def toIndexGlobal(self, sl):
+        """
+        Return the global mesh indices from
+        their local indices.
+        Iglobal = Ilocal + _global_start - ghost
+        @param loc_index an array of size domain.dim*2
+        with loc_index = [start_dir1, end_dir1, start_dir2, end_dir2, ...]
+        return an array of indices, same 'setup' as loc_index.
+        """
+        shift = self.global_start - self.local_start
+        dimension = self.discretization.ghosts.size
+        return [slice(sl[i].start + shift[i], sl[i].stop + shift[i])
+                for i in xrange(dimension)]
+
+    def __eq__(self, other):
+        """
+        = operator for mesh.
+        We consider that 2 submeshes are equal if all the conditions
+        below are fulfilled:
+        - their global resolution is the same
+        - the ghost layer is the same
+        - their local resolution is the same
+        """
+        if self.__class__ != other.__class__:
+            return False
+        return self.discretization == other.discretization and\
+            npw.equal(self.resolution, other.resolution).all()
diff --git a/HySoP/hysop/mpi/mesh.pyc b/HySoP/hysop/mpi/mesh.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..653ce982d8b950d55ad6d2d389d9423f0059e287
Binary files /dev/null and b/HySoP/hysop/mpi/mesh.pyc differ
diff --git a/HySoP/hysop/mpi/newBridge.py b/HySoP/hysop/mpi/newBridge.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac7d911c775df6a6d59f90881bd671a7d3e1890b
--- /dev/null
+++ b/HySoP/hysop/mpi/newBridge.py
@@ -0,0 +1,204 @@
+"""
+@file bridge.py
+Tools to compute the intersection between
+two topologies.
+"""
+from parmepy.constants import np, debug
+from parmepy.mpi.main_var import MPI
+import parmepy.tools.numpywrappers as npw
+from parmepy.mpi.topology import topotools
+from parmepy.constants import PARMES_MPI_REAL, ORDERMPI
+
+
+# ## Example of broadcast:
+# ## Process 0 of group id_from broadcasting value to group id_to
+# if proc_tasks[parent_rank] == id_from:
+#     if my_rank == 0:
+#         value = 123
+#         inter_comm.bcast(value, root=MPI.ROOT)
+#     else:
+#         value = None
+#         inter_comm.bcast(value, root=MPI.PROC_NULL)
+#     value = comm_s.bcast(value, root=0) # for broad cast within the group
+# elif proc_tasks[parent_rank] == id_to:
+#     value = None
+#     value = inter_comm.bcast(value, root=0)
+# assert value == 123
+
+# ## Example of Point-to-Point send/receve:
+# x = npw.zeros((10, ))
+# if my_rank == 0 and proc_tasks[parent_rank] == id_from:
+#     x = npw.realarray(np.arange(10))
+# if my_rank == 1 and proc_tasks[parent_rank] == id_to:
+#     inter_comm.Recv([x, 10, PARMES_MPI_REAL], source=0, tag=0)
+# if my_rank == 0 and proc_tasks[parent_rank] == id_from:
+#     inter_comm.Send([x, 10, PARMES_MPI_REAL], dest=1, tag=0)
+# if my_rank == 1 and proc_tasks[parent_rank] == id_to:
+#     assert (x == npw.realarray(np.arange(10))).all()
+
+class InterBridge(object):
+
+    def __init__(self, topoFrom, topoTo, idFrom, idTo, proc_tasks,
+                 parent_comm=None):
+        """
+        Create a bridge between communicators of a single topology.
+        @param topo : Topology to consider.
+        @param parent_comm : MPI communicator that containts all process
+        involved in this bridge.
+        @param id_from : Identifier for the input task.
+        @param id_to : Identifier for the output task.
+        @param proc_task : Tasks assignment for process.
+        """
+
+        ## Connectivity table : what must be sent (and to which process)
+        ## from current process.
+        self.transfers = {}
+        # Do nothing if both tasks are not present in proc_tasks list.
+        if idTo not in proc_tasks or idFrom not in proc_tasks:
+            return
+        ## Source topology
+        self.topoFrom = topoFrom
+        ## Targeted topology
+        self.topoTo = topoTo
+        # Id of the source group
+        self._id_from = idFrom
+        # Id of the targeted group
+        self._id_to = idTo
+        # Warning : depending on the current mpi task, one
+        # topology may be None.  
+
+        self.parent_comm = parent_comm
+        assert topoFrom is not topoTo
+        msg = 'Local topology and required task id are not complient.'
+        if self.topoFrom is not None:
+
+            self.localTopo = self.topoFrom
+            assert self._id_from == self.topoFrom.task_id, msg
+            localId = self._id_from
+            remoteId = self._id_to
+            
+        if self.topoTo is not None:
+            self.localTopo = self.topoTo
+            assert self._id_to == self.topoTo.task_id, msg
+            localId = self._id_to
+            remoteId = self._id_from
+            
+        if self.parent_comm is None:
+            self.parent_comm = self.localTopo.parent()
+
+        self.proc_tasks = proc_tasks
+
+        msg = 'task id does not exist in proc_tasks list.'
+
+        parent_rank = self.parent_comm.Get_rank()
+        
+        # True if current process is in the 'from' group'
+        taskFrom = self.proc_tasks[parent_rank] == self._id_from
+
+        # True if current process is in the 'to' group
+        taskTo = self.proc_tasks[parent_rank] == self._id_to
+
+        # Ensure that current process belongs to one and only one task.
+        msg = 'The current process does not belong to any mpi task.'
+        assert taskTo or taskFrom, msg
+        msg = 'You can not build an inter-bridge '
+        msg += 'between topologies of the same MPI task.'
+        assert not(taskTo and taskFrom), msg
+
+        comm = self.localTopo.comm
+        rank = comm.Get_rank()
+        dimension = self.localTopo.domain.dimension
+
+        # ==== 0 - Create an intercommunicator ====
+        # Create_intercomm attributes are:
+        #   - local rank of leader process for current group (always 0)
+        #   - parent communicator
+        #   - rank of leader process in the remote group
+        self.inter_comm = comm.Create_intercomm(
+            0, self.parent_comm, self.proc_tasks.index(remoteId))
+
+        # 1 - Get global mesh indices of my domain
+        my_start = self.localTopo.mesh.global_start
+        my_end = self.localTopo.mesh.global_end
+        my_slices = npw.zeros((dimension * 2), dtype=np.int32)
+        my_slices[0::2] = my_start
+        my_slices[1::2] = my_end
+
+        # 2 - Get communicators sizes
+        localSize = comm.Get_size()
+        remoteSize = self.inter_comm.Get_remote_size()
+
+        # 3.1 - each process of the 'from' group broadbast its slice
+        # to the 'to' group.
+        r1 = self._sendSlices(my_slices, rank, localSize,
+                              remoteSize, taskFrom, taskTo)
+        # 3.2 - each process of the 'to' group broadbast its slice
+        # to the 'from' group.
+        r2 = self._sendSlices(my_slices, rank, localSize,
+                              remoteSize, taskTo, taskFrom)
+
+        self.parent_comm.Barrier()
+
+        if taskFrom:
+            other_slices = r2
+        if taskTo:
+            other_slices = r1
+
+        # 4. - Build connectivity
+
+        intersect_slice = []
+        for other_rk in xrange(remoteSize):
+            start = np.maximum(my_slices[0::2], other_slices[other_rk, 0::2])
+            end = np.minimum(my_slices[1::2], other_slices[other_rk, 1::2]) + 1
+            for pos in range(dimension):
+                intersect_slice.append(slice(start[pos], end[pos]))
+            self.transfers[other_rk] = \
+                self.localTopo.toIndexLocal2(intersect_slice)
+
+    def _sendSlices(self, localSlice, rank, nbProcs1, nbProcs2, task1, task2):
+        """
+        @param localSlice : slice to be sent
+        @param rank : mpi process rank in task1 communicator.
+        @param nbProcs1 : size of task1 communicator
+        @param nbProcs2 : size of task2 communicator
+        @param task1 : true if current process belongs to task1
+        @param task2 : true if current process belongs to task2
+
+        Purpose : Task1 group sends local slice to task2,
+        through mpi inter-communicator.
+        This routine MUST be called by all procs of parent communicator.
+
+        """
+        length = localSlice.size
+        sendBuff = npw.zeros((nbProcs1, length), dtype=np.int32)
+        recvBuff = npw.zeros((nbProcs2, length), dtype=np.int32)
+
+        if task1:
+            for rk in xrange(nbProcs1):
+                if rank == rk:
+                    sendBuff[rk, :] = localSlice
+                    self.inter_comm.bcast(sendBuff[rk, :], root=MPI.ROOT)
+                else:
+                    self.inter_comm.bcast(sendBuff[rk, :], root=MPI.PROC_NULL)
+        elif task2:
+            for rk in xrange(nbProcs2):
+                recvBuff[rk, :] = self.inter_comm.bcast(recvBuff[rk, :],
+                                                        root=rk)
+
+        return recvBuff
+
+    def _createSubArray(self, slicesList, subtypes, dataShape):
+
+        assert isinstance(subtypes, dict)
+        dim = len(dataShape)
+        for rk in slicesList.keys():
+            subvshape = tuple([slicesList[rk][i].stop -
+                               slicesList[rk][i].start for i in range(dim)])
+            substart = tuple([slicesList[rk][i].start for i in range(dim)])
+            subtypes[rk] = \
+                PARMES_MPI_REAL.Create_subarray(dataShape,
+                                                subvshape,
+                                                substart,
+                                                order=ORDERMPI)
+            subtypes[rk].Commit()
+
diff --git a/HySoP/hysop/mpi/tests/test_bridge.py b/HySoP/hysop/mpi/tests/test_bridge.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f3575a90396f59352c51f8bb38de07320e7e682
--- /dev/null
+++ b/HySoP/hysop/mpi/tests/test_bridge.py
@@ -0,0 +1,33 @@
+
+
+def test_bridge2D():
+    import math
+    Lx = Ly = 2 * math.pi
+    from parmepy.mpi.bridge import Bridge
+    dom = Box(dimension=2, length=[Lx, Ly], origin=[0., 0.])
+    resolution = [65, 65]
+    topo2 = Cartesian(dom, 2, resolution)
+    topo2.setup()
+    topo1 = Cartesian(dom, 1, resolution)
+    topo1.setup()
+    bridge = Bridge(topo1, topo2)
+    print (bridge)
+    # We cannot really check something interesting,
+    # so we just create a bridge.
+
+
+def test_bridge3D():
+    import numpy as np
+    from parmepy.mpi.bridge import Bridge
+    dom = Box()
+    resolution3d = np.asarray((65, 65, 65), dtype=PARMES_INDEX)
+    topo1 = Cartesian(dom, 3, resolution3d)
+    topo1.setup()
+    topo2 = Cartesian(dom, 1, resolution3d)
+    topo2.setup()
+    bridge = Bridge(topo1, topo2)
+    print (bridge)
+    # We cannot really check something interesting,
+    # so we just create a bridge.
+
+
diff --git a/HySoP/hysop/mpi/tests/test_mesh.py b/HySoP/hysop/mpi/tests/test_mesh.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f3c7570d0d319a22328d38a3a9e6db0b0655217
--- /dev/null
+++ b/HySoP/hysop/mpi/tests/test_mesh.py
@@ -0,0 +1,61 @@
+"""
+@file parmepy.mpi.tests.test_mesh
+Testing mesh.
+"""
+import numpy as np
+from parmepy.domain.box import Box
+from parmepy.mpi.topology import Cartesian
+from parmepy.tools.parameters import Discretization
+
+
+def test_mesh3D():
+    """Periodic mesh"""
+    dom = Box()
+    resolTopo = Discretization([33, 33, 17])
+    topo = Cartesian(dom, resolTopo, dim=3)
+    res = resolTopo.resolution
+    dx = [1. / (n - 1) for n in res]
+    assert (topo.mesh.origin == [0. for n in res]).all()
+    assert (topo.mesh.end == [1. - dxx for dxx in dx]).all()
+    assert (topo.mesh.global_start == [0 for n in res]).all()
+    assert (topo.mesh.global_end == [n - 2 for n in res]).all()
+    assert (topo.mesh.local_start == [0 for n in res]).all()
+    assert (topo.mesh.local_end == [n - 2 for n in res]).all()
+    assert len(topo.mesh.coords) == 3
+    assert topo.mesh.coords[0].shape == (res[0] - 1, 1, 1)
+    assert topo.mesh.coords[1].shape == (1, res[1] - 1, 1)
+    assert topo.mesh.coords[2].shape == (1, 1, res[2] - 1)
+    assert topo.mesh.coords[0][1, 0, 0] == dx[0]
+    assert topo.mesh.coords[1][0, 1, 0] == dx[1]
+    assert topo.mesh.coords[2][0, 0, 1] == dx[2]
+
+
+def test_mesh3D_ghost():
+    """Periodic mesh"""
+    dom = Box()
+    resolTopo = Discretization([33, 33, 17], [2, 2, 2])
+    topo = Cartesian(dom, resolTopo, dim=3)
+    ghost = topo.ghosts()
+    res = resolTopo.resolution
+    dx = [1. / (n - 1) for n in res]
+    assert (topo.mesh.origin ==
+            [0. - g * dxx for dxx, g in zip(dx, ghost)]).all()
+    assert (topo.mesh.end ==
+            [1. - dxx + g * dxx for dxx, g in zip(dx, ghost)]).all()
+    assert (topo.mesh.global_start == [0 for n in res]).all()
+    assert (topo.mesh.global_end == [n - 2 for n in res]).all()
+    assert (topo.mesh.local_start == [g for g in ghost]).all()
+    assert (topo.mesh.local_end ==
+            [n - 2 + g for n, g in zip(res, ghost)]).all()
+    assert len(topo.mesh.coords) == 3
+    assert topo.mesh.coords[0].shape == (res[0] - 1 + 2 * ghost[0], 1, 1)
+    assert topo.mesh.coords[1].shape == (1, res[1] - 1 + 2 * ghost[1], 1)
+    assert topo.mesh.coords[2].shape == (1, 1, res[2] - 1 + 2 * ghost[2])
+    assert topo.mesh.coords[0][1, 0, 0] == (-ghost[0] + 1) * dx[0]
+    assert topo.mesh.coords[1][0, 1, 0] == (-ghost[1] + 1) * dx[1]
+    assert topo.mesh.coords[2][0, 0, 1] == (-ghost[2] + 1) * dx[2]
+
+# Todo : update tests for multi-proc mpi runs.
+#if __name__ == '__main__':
+#    test_mesh3D()
+#    test_mesh3D_ghost()
diff --git a/HySoP/hysop/mpi/tests/test_topology.py b/HySoP/hysop/mpi/tests/test_topology.py
index 2eca025d86adc69244f449b2a6c19485880eb73b..3961be0e9a1521c19357f2972f61ccfbbaa8404e 100644
--- a/HySoP/hysop/mpi/tests/test_topology.py
+++ b/HySoP/hysop/mpi/tests/test_topology.py
@@ -1,116 +1,267 @@
 import parmepy as pp
 from parmepy.domain.box import Box
-from parmepy.mpi.topology import Cartesian
-from parmepy.constants import PARMES_INDEX
+from parmepy.constants import DEFAULT_TASK_ID
+from parmepy.tools.parameters import Discretization
+from parmepy.mpi import main_size
+import numpy as np
+import parmepy.tools.numpywrappers as npw
 
 
-def test_create_topology1D():
-    dom = Box()
-    resolTopo = [33, 33, 17]
-    topo = Cartesian(dom, 1, resolTopo)
-    topo.setUp()
+N = 33
+r1D = Discretization([N])
+r2D = Discretization([N, 17])  # No ghosts
+r3D = Discretization([N, N, 17])  # No ghosts
+r3DGh = Discretization([N, N, 17], [2, 2, 2])  # Ghosts
+
+CPU = DEFAULT_TASK_ID
+GPU = 29
+proc_tasks = [CPU] * main_size
+if main_size > 2:
+    proc_tasks[-1] = GPU
+    proc_tasks[0] = GPU
+
+dom3D = Box(proc_tasks=proc_tasks)
+dom2D = Box(dimension=2, proc_tasks=proc_tasks)
+dom3D_notask = Box()
+
+# A mesh of reference for comparion.
+# Obviously we assume that default topo constructor works well...
+toporef = dom3D.create_topology(r3DGh, dim=1)
+refmesh = toporef.mesh
+toporef_notask = dom3D_notask.create_topology(r3DGh, dim=1)
+toporef2d = dom2D.create_topology(r2D, dim=1)
+refmesh2d = toporef2d.mesh
+
+
+def check2D(topo):
+    assert topo.size == main_size
+    assert topo.task_id() == DEFAULT_TASK_ID
+    assert np.allclose(topo.mesh.discretization.resolution,
+                       r2D.resolution)
+
+
+def check3D(topo):
+    assert topo.size == main_size
+    assert topo.task_id() == DEFAULT_TASK_ID
+    assert np.allclose(topo.mesh.discretization.resolution,
+                       r3D.resolution)
+
 
+# ===== 2D domains ====
+# Default:
+def test_create_default_topology_2d():
+    dom = Box(dimension=2)
+    topo = dom.create_topology(r2D)
+    assert topo.domain is dom
+    check2D(topo)
+
+
+# Test taskid
+def test_create_default_topology2_2d():
+    dom = Box(dimension=2, proc_tasks=proc_tasks)
+    topo = dom.create_topology(r2D)
     assert topo.domain == dom
-    assert topo.size == pp.mpi.main_size
+    assert topo.size == dom2D.comm_task.Get_size()
+    if dom.isOnTask(CPU):
+        assert topo.task_id() == CPU
+    if dom.isOnTask(GPU):
+        assert topo.task_id() == GPU
 
 
-def test_create_topology2D():
-    dom = Box()
-    resolTopo = [33, 33, 17]
-    topo = Cartesian(dom, 2, resolTopo)
-    topo.setUp()
+# Input : dimension
+def test_create_topologyFromDim_2d():
+    dom = Box(dimension=2)
+    topo1 = dom.create_topology(r2D, dim=1)
+    check2D(topo1)
+    topo2 = dom.create_topology(r2D, dim=2)
+    check2D(topo2)
+
+
+# Input : shape
+def test_create_topologyFromShape_2d():
+    dom = Box(dimension=2)
+    if main_size == 8:
+        topoShape = npw.asdimarray([2, 4])
+        topo = dom.create_topology(r2D, shape=topoShape)
+        assert topo.domain == dom
+        assert topo.dimension == 2
+        assert topo.size == pp.mpi.main_size
+        assert (topo.shape == topoShape).all()
+        assert (topo.mesh.resolution == [16, 4]).all()
+
+    else:
+        shape = [main_size, 1]
+        topo = dom.create_topology(r2D, shape=shape)
+        assert (topo.shape == shape).all()
+        assert topo.dimension == 1
+        check2D(topo)
+
 
+# Input = cutdir
+def test_create_topologyFromCutdir_2d():
+    dom = Box(dimension=2)
+    if main_size >= 4:
+        topo = dom.create_topology(r2D, cutdir=[False, True])
+        assert topo.domain == dom
+        assert topo.dimension == 1
+        assert topo.size == pp.mpi.main_size
+        assert (topo.shape == [1, main_size]).all()
+
+    topo2 = dom.create_topology(r2D, cutdir=[True, False])
+    assert (topo2.shape == [main_size, 1]).all()
+    assert topo2.dimension == 1
+    check2D(topo2)
+
+
+# plane topo with input mesh
+def test_create_planetopology_2d():
+    dom = Box(dimension=2)
+    offs = refmesh2d.start()
+    lres = refmesh2d.resolution
+    topo = dom.create_plane_topology_from_mesh(global_start=offs,
+                                               localres=lres,
+                                               discretization=r2D,
+                                               )
     assert topo.domain == dom
+    assert topo.dimension == 1
     assert topo.size == pp.mpi.main_size
+    assert (topo.shape == [1, main_size]).all()
+    assert topo.mesh == refmesh2d
+    topo2 = dom.create_plane_topology_from_mesh(discretization=r2D,
+                                                global_start=offs,
+                                                localres=lres, cdir=0)
+    assert topo2.domain == dom
+    assert topo2.dimension == 1
+    assert topo2.size == pp.mpi.main_size
+    assert (topo2.shape == [main_size, 1]).all()
 
 
-def test_create_topology3D():
+# ===== 3D domains ====
+# Default:
+def test_create_default_topology():
     dom = Box()
-    resolTopo = [33, 33, 17]
-    topo = Cartesian(dom, 3, resolTopo)
-    topo.setUp()
+    topo = dom.create_topology(r3D)
+    assert topo.domain is dom
+    check3D(topo)
 
+
+# Test taskid
+def test_create_default_topology2():
+    dom = Box(proc_tasks=proc_tasks)
+    topo = dom.create_topology(r3D)
     assert topo.domain == dom
-    assert topo.size == pp.mpi.main_size
+    assert topo.size == dom3D.comm_task.Get_size()
+    if dom.isOnTask(CPU):
+        assert topo.task_id() == CPU
+    if dom.isOnTask(GPU):
+        assert topo.task_id() == GPU
 
 
-def test_create_topology_with_dims():
+# Input : dimension
+def test_create_topologyFromDim():
     dom = Box()
-    resol = [33, 33, 17]
-    import numpy as np
-    topoDims = np.asarray([pp.mpi.main_size, 1, 1])
-    topo = Cartesian.withResolution(dom, shape=topoDims,
-                                    globalMeshResolution=resol)
-    topo.setUp()
-    assert topo.domain == dom
-    assert topo.dim == 1
-    assert topo.size == pp.mpi.main_size
-    testList = topo.shape[topo.shape != 1] == topoDims[topoDims != 1]
-    assert testList.all()
+    topo1 = dom.create_topology(r3D, dim=1)
+    check3D(topo1)
+    topo2 = dom.create_topology(r3D, dim=2)
+    check3D(topo2)
+    topo3 = dom.create_topology(r3D, dim=3)
+    check3D(topo3)
 
 
-def test_operator_equal():
+# Input : shape
+def test_create_topologyFromShape():
     dom = Box()
-    resolTopo = [33, 33, 17]
-    topoDims = [pp.mpi.main_size, 1, 1]
-    topo = Cartesian.withResolution(dom, topoDims, resolTopo)
-    topo2 = Cartesian(Box(), 2, resolTopo)
-    assert not topo == topo2
-    topo2 = Cartesian(dom, 2, [11, 33, 17])
-    assert not topo == topo2
+    if main_size == 8:
+        topoShape = npw.asdimarray([2, 2, 2])
+        topo = dom.create_topology(r3D, shape=topoShape)
+        assert topo.domain == dom
+        assert topo.dimension == 3
+        assert topo.size == pp.mpi.main_size
+        assert (topo.shape == topoShape).all()
+        assert (topo.mesh.resolution == [16, 16, 8]).all()
 
-    topo2 = Cartesian(dom, 2, shape=topoDims[-1::-1],
-                      globalMeshResolution=resolTopo)
-    assert topo == topo2
+    else:
+        shape = [main_size, 1, 1]
+        topo = dom.create_topology(r3D, shape=shape)
+        assert (topo.shape == shape).all()
+        assert topo.dimension == 1
+        check3D(topo)
 
 
-def test_operator_notequal():
+# Input = cutdir
+def test_create_topologyFromCutdir():
     dom = Box()
-    resol1 = [33, 33, 17]
-    resol2 = [33, 33, 12]
-    topo = Cartesian(dom, 3, resol1)
-    topo2 = Cartesian(dom, 3, resol2)
-    assert topo != topo2
-
-
-def test_bridge2D():
-    import math
-    Lx = Ly = 2 * math.pi
-    from parmepy.mpi.bridge import Bridge
-    dom = Box(dimension=2, length=[Lx, Ly], origin=[0., 0.])
-    resolution = [65, 65]
-    topo2 = Cartesian(dom, 2, resolution)
-    topo2.setUp()
-    topo1 = Cartesian(dom, 1, resolution)
-    topo1.setUp()
-    bridge = Bridge(topo1, topo2)
-    print (bridge)
-    # We cannot really check something interesting,
-    # so we just create a bridge.
-
-
-def test_bridge3D():
-    import numpy as np
-    from parmepy.mpi.bridge import Bridge
+    if main_size == 8:
+        topo = dom.create_topology(r3D, cutdir=[False, True, True])
+        assert topo.domain == dom
+        assert topo.dimension == 2
+        assert topo.size == pp.mpi.main_size
+        assert (topo.shape == [1, 2, 4]).all()
+
+    topo2 = dom.create_topology(r3D, cutdir=[False, True, False])
+    assert (topo2.shape == [1, main_size, 1]).all()
+    assert topo2.dimension == 1
+    check3D(topo2)
+
+
+# plane topo with input mesh
+def test_create_planetopology():
+    dom = Box()
+    offs = refmesh.start()
+    lres = refmesh.resolution
+    topo = dom.create_plane_topology_from_mesh(discretization=r3DGh,
+                                               global_start=offs,
+                                               localres=lres)
+    assert topo.domain == dom
+    assert topo.dimension == 1
+    assert topo.size == pp.mpi.main_size
+    assert (topo.shape == [1, 1, main_size]).all()
+    assert topo.mesh == refmesh
+    topo2 = dom.create_plane_topology_from_mesh(discretization=r3DGh,
+                                                global_start=offs,
+                                                localres=lres, cdir=1)
+    assert topo2.domain == dom
+    assert topo2.dimension == 1
+    assert topo2.size == pp.mpi.main_size
+    assert (topo2.shape == [1, main_size, 1]).all()
+
+
+def test_operator_equal():
     dom = Box()
-    resolution3d = np.asarray((65, 65, 65), dtype=PARMES_INDEX)
-    topo1 = Cartesian(dom, 3, resolution3d)
-    topo1.setUp()
-    topo2 = Cartesian(dom, 1, resolution3d)
-    topo2.setUp()
-    bridge = Bridge(topo1, topo2)
-    print (bridge)
-    # We cannot really check something interesting,
-    # so we just create a bridge.
+    topoDims = [main_size, 1, 1]
+    topo = dom.create_topology(r3DGh, shape=topoDims)
+    mesh = toporef_notask.mesh
+    topo2 = Box().create_plane_topology_from_mesh(
+        discretization=r3DGh, global_start=mesh.start(),
+        localres=mesh.resolution, cdir=2)
+    # Same as topo2 but the discretization
+    topo3 = Box().create_plane_topology_from_mesh(
+        discretization=r3D, global_start=mesh.start(),
+        localres=mesh.resolution, cdir=2)
+    assert topo2.mesh == mesh
+    assert (topo2.shape == toporef_notask.shape).all()
+    assert topo2.domain == toporef_notask.domain
+    assert topo2 == toporef_notask
+    assert not topo2 == topo3
+    if main_size > 1:
+        assert not topo == topo2
+    else:
+        assert topo == topo2
 
+    # test not equal ...
+    assert topo2 != topo3
 
 if __name__ == "__main__":
-    test_create_topology1D()
-    test_create_topology2D()
-    test_create_topology3D()
-    test_create_topology_with_dims()
+    test_create_default_topology_2d()
+    test_create_default_topology2_2d()
+    test_create_topologyFromDim_2d()
+    test_create_topologyFromShape_2d()
+    test_create_topologyFromCutdir_2d()
+    test_create_planetopology_2d()
+    test_create_default_topology()
+    test_create_default_topology2()
+    test_create_topologyFromDim()
+    test_create_topologyFromShape()
+    test_create_topologyFromCutdir()
+    test_create_planetopology()
     test_operator_equal()
-    test_operator_notequal()
-    test_bridge2D()
-    test_bridge3D()
- 
diff --git a/HySoP/hysop/mpi/topoOld b/HySoP/hysop/mpi/topoOld
new file mode 100644
index 0000000000000000000000000000000000000000..769f4115a368a445b3342b60e4d410c6abfd369c
--- /dev/null
+++ b/HySoP/hysop/mpi/topoOld
@@ -0,0 +1,494 @@
+"""
+@file topology.py
+
+Parmes topologies definitions and related tools.
+A Parmes Topology is defined as the association of
+a mpi process distribution (mpi topology) and of a set of local meshes
+(one per process).
+
+At the time, only cartesian topologies with cartesian meshes are available.
+
+This module provides the following classes :
+- Cartesian : 1,2 or 3D parmes topology.
+- Bridge : bridge between two parmes topologies : i.e. what must be
+exchanged between two topologies and how.
+
+To get more details try :
+\code
+>> import parmepy.mpi.topology as topo
+>> help(topo.Cartesian)
+>> help(topo.Bridge)
+\endcode
+
+"""
+
+from parmepy.constants import ORDER, PARMES_INDEX, debug, PARMES_INTEGER
+from parmepy.mpi.mesh import SubMesh
+from itertools import count
+from parmepy.mpi.main_var import MPI
+from parmepy.tools.parameters import Discretization
+import numpy as np
+
+
+class Cartesian(object):
+    """
+    Define a MPI cartesian topology (mpi processes layout) AND
+    a local cartesian mesh on each sub-domain.
+    """
+
+    @debug
+    def __new__(cls, *args, **kw):
+        return object.__new__(cls, *args, **kw)
+
+    # Counter of topology.Cartesian instances to set a unique id for each
+    # Cartesian topology instance.
+    __ids = count(0)
+
+    @debug
+    def __init__(self, domain, dim, discretization, mpi_params,
+                 periods=None, cutdir=None, shape=None,
+                 precomputed=False, localres=None, localoffset=None):
+        """
+        @param domain : the geometry; it must be a box.
+        @param dim : dimension of the topology
+        @param discretization : a parmepy.tools.parameters.Discretization
+        with:
+        - resolution = Number of points in the domain
+        in each direction. We assume that first point corresponds
+        to origin, and last point to boundary point,
+        whatever the boundary type is.
+        That is x[0] = domain.origin and
+        x[Discretization.resolution-1] = domain.Lengths_x.
+        - ghosts =  number of points in the ghost layer
+        @param mpi_params : a parmepy.tools.parameters.MPI_params, with:
+        - comm : MPI communicator used to create this topology
+         (default = main_comm)
+        - task_id : id of the task that owns this topology.
+        @param shape : topology resolution
+        (i.e process layout in each direction).
+        @param periods : periodicity of the topology (in each direction)
+        """
+        assert isinstance(discretization, Discretization)
+        globalMeshResolution = discretization.resolution
+        self.ghosts = discretization.ghosts
+
+        assert domain.dimension == globalMeshResolution.__len__(), \
+            'The resolution size differs from the domain dimension.'
+
+        ## Associated domain
+        self.domain = domain
+        # An id for the topology
+        self.__id = self.__ids.next()
+        ## Dim of the cartesian grid for MPI processes.
+        self.dim = dim
+        ## mpi parameters : Communicator used to build the topology
+        ## and task_id
+        self._mpis = mpi_params
+        assert mpi_params.task_id is not None
+        # number of process in parent comm
+        origin_size = self._mpis.comm.Get_size()
+        ## True if everything has been properly set for this topology
+        self.isUpToDate = False
+        # Mind the sequential case : dim from withConstr may be equal
+        # to 0 if shape[:] = 1
+        ## Grid of mpi process layout
+        self.shape = np.ones(self.domain.dimension, dtype=PARMES_INDEX)
+        # MPI processes layout.
+        if shape is None:
+            # If shape is not provided, computation of  the "optimal" processus
+            # distribution for each direction of the grid topology.
+            shape = np.asarray(MPI.Compute_dims(origin_size, self.dim),
+                               dtype=PARMES_INDEX)
+
+            if cutdir is not None:
+                self.cutdir = np.array(cutdir, dtype=np.bool)
+                self.shape[self.cutdir] = shape
+            else:
+                # Reorder shape according to the data layout
+                # if arrays are in "fortran" order (column major)
+                # the last dir has priority for distribution.
+                # For C-like (row major) arrays, first dir is the
+                # first to be distributed, and so on.
+                self.shape[:self.dim] = shape
+                self.shape.sort()
+                if ORDER == 'C':
+                    self.shape[:] = self.shape[::-1]
+                self.cutdir = self.shape != 1
+            # redim if necessary ...
+            self.dim = self.shape[self.cutdir].size
+
+        else:
+            shape = np.asarray(shape)
+            assert shape.size == self.domain.dimension, 'Input shape must be \
+                of the same size as the domain dimension.'
+            if cutdir is None:
+                self.cutdir = shape != 1
+            else:
+                self.cutdir = cutdir
+
+            self.shape = shape
+            self.dim = self.shape[self.cutdir].size
+
+        # Special care for the 1 process case:
+        if origin_size == 1:
+            self.dim = 1
+            self.cutdir = shape != 1
+            self.cutdir[-1] = True
+
+        ## MPI process grid periodicity; default is true.
+        self.periods = np.ones((self.dim), dtype=np.bool)
+        if periods is not None:
+            assert periods.size == self.dim
+            self.periods[:] = periods[:]
+        self.comm = self._mpis.comm.Create_cart(self.shape[self.cutdir],
+                                                periods=self.periods,
+                                                reorder=True)
+
+        ## Size of the topology (i.e. total number of mpi processes)
+        self.size = self.comm.Get_size()
+        ## Rank of the current process in the topology
+        self.rank = self.comm.Get_rank()
+        ## Coordinates of the current process
+        self.reduced_coords = np.asarray(self.comm.Get_coords(self.rank),
+                                         dtype=PARMES_INDEX)
+        ## Coordinates of the current process
+        ## What is different between proc_coords and reduced_coords?
+        ## --> proc_coords has values even for directions that
+        ## are not distributed. If cutdir = [False, False, True]
+        ## then reduced_coords = [ nx ] and proc_coords = [0, 0, nx]
+        self.proc_coords = np.zeros(self.domain.dimension, dtype=PARMES_INDEX)
+        self.proc_coords[self.cutdir] = self.reduced_coords
+        ## Neighbours : self.neighbours[0,i] (resp. [1,i])
+        ## previous (resp. next) neighbour in direction i
+        ## (warning : direction in the grid of process).
+        self.neighbours = np.zeros((2, self.dim), dtype=PARMES_INTEGER)
+        for direction in range(self.dim):
+            self.neighbours[:, direction] = self.comm.Shift(direction, 1)
+
+        # --- Computation of the local mesh resolution and indices ---
+        ##  Resolution of the global mesh
+        self.globalMeshResolution = np.asarray(globalMeshResolution,
+                                               dtype=PARMES_INDEX)
+
+        # If local resolution has already been computed
+        # (for example from fftw init output)
+        if precomputed:
+            self.G_start = localoffset
+            self.localGridResolution = localres
+
+        # Usual case : we compute local resolution/offset
+        else:
+            # Number of "computed" points (i.e. excluding ghosts/boundaries).
+            pts_noghost = np.zeros((self.domain.dimension), dtype=PARMES_INDEX)
+            # Warning FP : we should test boundary conditions type here
+            # If periodic, resol_calc = (globalMeshResolution - 1)
+            # else, resol_calc =  (globalMeshResolution - 2)
+            resolCalc = np.zeros((domain.dimension), dtype=PARMES_INDEX)
+            resolCalc[:] = self.globalMeshResolution[:] - 1
+            pts_noghost[:] = resolCalc // self.shape
+
+            # If any, remaining points are
+            # added on the mesh of the last process.
+            remainingPts = np.zeros(self.domain.dimension, dtype=PARMES_INDEX)
+            remainingPts[:] = resolCalc % self.shape
+
+            # Then the total number of points (size of arrays to be allocated)
+            nbpoints = pts_noghost.copy()
+            for i in range(self.domain.dimension):
+                if self.proc_coords[i] == self.shape[i] - 1:
+                    nbpoints[i] += remainingPts[i]
+
+            self.localGridResolution = resolCalc.copy()
+            self.localGridResolution[:] = nbpoints[:]
+            self.localGridResolution[:] += 2 * self.ghosts[:]
+
+            ## Global indices for the local mesh points;
+            ## self.G_start[i] is the index of the first
+            ## point in the i direction (It includes ghosts/boundary points).
+            self.G_start = np.zeros((domain.dimension), dtype=PARMES_INDEX)
+            self.G_start[:] = self.proc_coords[:] * pts_noghost[:]
+
+        ## Local mesh on the current mpi process.
+        self.mesh = None
+
+        # The topology is register into domain list.
+        # If it already exists (in the sense of the comparison operator
+        # of the present class) then isNew is turned to false
+        # and the present instance will be destroyed.
+        self.isNew = True
+        self.__id = self.domain.register(self)
+
+    def parent(self):
+        """
+        returns the communicator used to build this topology
+        """
+        return self._mpis.comm
+
+    def task_id(self):
+        """
+        @return id of the task that owns this topology
+        """
+        return self._mpis.task_id
+
+    @debug
+    def setUp(self):
+        """Topology set up.
+
+        Create topology's Local Mesh.
+        This function is called during registration in the domain
+        (domain.register(topo)).
+        """
+        if not self.isUpToDate:
+            self.mesh = SubMesh(self, self.G_start, self.localGridResolution)
+        self.isUpToDate = True
+
+    @classmethod
+    def withResolution(cls, domain, shape, discretization,
+                       mpi_params, periods=None):
+        """
+        Compute a topology for a given shape of the processus grid.
+        If the dimension of the topology is smaller than the domain dimension,
+        shape will be reordered according to the following rule :
+        (depends on array storage type (C or Fortran))
+        - if C : first dir, first distributed.
+        - if Fortran : last dir, first distributed.
+        @param domain in which the topology is defined
+        @param topoResolution : mpi process grid resolution (MUST be of
+        the same size as domain), set topoResolution[i] = 1 if
+        you don't want to cut the domain in direction i.
+        @param ghosts : number of ghost points in each direction, default = 1.
+        @param periods : periodicity in each direction, default = true.
+        """
+
+        shape = np.asarray(shape, dtype=PARMES_INDEX)
+        assert shape.size == domain.dimension, 'Array for topo \
+            resolution must be of size domain.dimension'
+        dim = shape[shape != 1].size
+        assert shape.all(), "Topology error : you try to \
+            create a topology with 0 resolution in one direction."
+        if dim < domain.dimension:
+            shape.sort()
+            if ORDER == 'C':
+                shape[:] = shape[::-1]
+        cutdir = shape != 1
+
+        return cls(domain, dim, discretization, cutdir=cutdir,
+                   periods=periods, ghosts=ghosts, shape=shape,
+                   mpi_params=mpi_params)
+
+    @classmethod
+    def withResolutionFixed(cls, domain, shape, discretization,
+                            mpi_params, periods=None):
+        """
+        Compute a topology for a given shape of the processus grid.
+        @param domain in which the topology is defined
+        @param topoResolution : mpi process grid resolution (MUST be of
+        the same size as domain), set topoResolution[i] = 1 if
+        you don't want to cut the domain in direction i.
+        @param ghosts : number of ghost points in each direction, default = 1.
+        @param periods : periodicity in each direction, default = true.
+        """
+        shape = np.asarray(shape, dtype=PARMES_INDEX)
+        assert shape.size == domain.dimension, 'Array for topo \
+            resolution must be of size domain.dimension'
+        dim = shape[shape != 1].size
+        assert shape.all(), "Topology error : you try to \
+            create a topology with 0 resolution in one direction."
+        cutdir = shape != 1
+        return cls(domain, dim, discretization, mpi_params=mpi_params,
+                   cutdir=cutdir, periods=periods, shape=shape)
+
+    @classmethod
+    def withCutdir(cls, domain, cutdir, discretization,
+                   mpi_params, periods=None):
+        """
+        Compute a topology from a list of directions to be cut.
+        @param domain in which the topology is defined
+        @param cutdir : array of bool, cutdir[i] = true if mesh must be
+        distributed in direction i.
+        (cutdir must be of the same size as the domain).
+        @param ghosts : number of ghost points in each direction, default = 1.
+        @param periods : periodicity in each direction, default = true.
+        """
+        cutdir = np.asarray(cutdir, dtype=np.bool)
+        dim = cutdir[cutdir].size
+        return cls(domain, dim, discretization, mpi_params=mpi_params,
+                   cutdir=cutdir, periods=periods)
+
+    @classmethod
+    def withPrecomputedResolution(cls, domain, shape, discretization,
+                                  localres, offset, mpi_params,
+                                  periods=None):
+        """
+        Compute a topology for a given shape of the processus grid
+        and a given local mesh resolution/offset (example : output from
+        fftw init process).
+        @param domain in which the topology is defined
+        @param topoResolution : mpi process grid resolution (MUST be of
+        the same size as domain), set topoResolution[i] = 1 if
+        you don't want to cut the domain in direction i.
+        @param localres : local mesh resolution
+        @param offset : local offset (i.e. global index of first local mesh
+        points)
+        @param ghosts : number of ghost points in each direction, default = 1.
+        @param periods : periodicity in each direction, default = true.
+        """
+        shape = np.asarray(shape, dtype=PARMES_INDEX)
+        assert shape.size == domain.dimension, 'Array for topo \
+            resolution must be of size domain.dimension'
+        dim = shape[shape != 1].size
+        assert shape.all(), "Topology error : you try to \
+            create a topology with 0 resolution in one direction."
+        cutdir = shape != 1
+
+        return cls(domain, dim, discretization, mpi_params=mpi_params,
+                   precomputed=True, localres=localres, localoffset=offset,
+                   periods=periods, cutdir=cutdir, shape=shape)
+
+    def __eq__(self, other):
+        """
+        Operator for comparison. Based on:
+        @li globalMeshResolution
+        @li localMeshResolution
+        @li shape
+        @li domain
+        @param other : Topology to compare with.
+        @todo completer le test dans parmepy/mpi/tests/test_topology.py
+        """
+        return np.equal(self.globalMeshResolution,
+                        other.globalMeshResolution).all() and \
+            np.equal(self.localGridResolution,
+                     other.localGridResolution).all() and \
+            np.equal(self.shape, other.shape).all() and \
+            np.equal(self.ghosts, other.ghosts).all() and \
+            self.domain == other.domain
+
+    def __ne__(self, other):
+        """
+        Not equal operator.
+        Seems to be required in addition to __eq__ to
+        avoid 'corner-case' behaviors.
+        @param other : Topology to compare with.
+        """
+        result = self.__eq__(other)
+        if result is NotImplemented:
+            return result
+        return not result
+
+    def __str__(self):
+        """ Topology info display """
+        s = '======== Topology id : ' + str(self.__id) + ' ========\n'
+        if self.rank == 0:
+            s += str(self.dim) + 'D cartesian Topology of size ' \
+                + str(self.shape) + '\n'
+        s += 'Process of coordinates ' + str(self.proc_coords[:])
+        s += ' and of ranks (topo/origin) ' + str(self.rank) + '/'
+        s += str(self._mpis.rank) + '.\n'
+        s += 'Neighbours coordinates : \n'
+        s += str(self.neighbours) + '\n'
+        s += 'Ghost layer : ' + str(self.ghosts[:]) + '\n'
+        s += 'Global mesh resolution : ' + str(self.globalMeshResolution)
+        s += '\n'
+        s += str(self.mesh)
+        s += '\n=================================\n'
+        return s
+
+    def hasGhosts(self):
+        """
+        True if ghost layer length is not zero.
+        """
+        return not np.all(self.ghosts == 0)
+
+    def getId(self):
+        """
+        @return the id of the present topology.
+        This id is unique among all defined topologies.
+        """
+        return self.__id
+
+    def toIndexLocal(self, glob_index):
+        """
+        Return the local mesh indices from
+        their global indices.
+        Ilocal = Iglobal - G_start + ghost
+        @param glob_index an array of size domain.dim*2
+        with glob_index = [start_dir1, end_dir1, start_dir2, end_dir2, ...]
+        return an array of indices, same 'setup' as glob_index.
+        """
+        ilocal = np.asarray(glob_index).copy()
+        ilocal[0::2] = ilocal[0::2] - self.G_start + self.ghosts
+        ilocal[1::2] = ilocal[1::2] - self.G_start + self.ghosts
+        return ilocal
+
+    def toIndexLocalFull(self, sl):
+        """
+        Return the local mesh indices from
+        their global indices.
+        Ilocal = Iglobal - G_start + ghost
+        @param glob_index an array of size domain.dim*2
+        with glob_index = [start_dir1, end_dir1, start_dir2, end_dir2, ...]
+        return an array of indices, same 'setup' as glob_index.
+        """
+        shift = self.mesh.local_start - self.mesh.global_start
+        imax = self.mesh.local_end + self.ghosts
+        return [slice(max(sl[i].start + shift[i], 0),
+                      min(sl[i].stop + shift[i], imax[i]))
+                for i in xrange(self.domain.dimension)]
+
+    def toIndexLocal2(self, sl):
+        """
+        Return the local mesh indices from
+        their global indices.
+        Ilocal = Iglobal - G_start + ghost
+        @param glob_index an array of size domain.dim*2
+        with glob_index = [start_dir1, end_dir1, start_dir2, end_dir2, ...]
+        return an array of indices, same 'setup' as glob_index.
+        """
+        shift = self.mesh.local_start - self.mesh.global_start
+        imax = self.mesh.local_end + 1
+        imin = self.mesh.local_start
+        return [slice(max(sl[i].start + shift[i], imin[i]),
+                      min(sl[i].stop + shift[i], imax[i]))
+                for i in xrange(self.domain.dimension)]
+
+    def toIndexGlobal(self, sl):
+        """
+        Return the global mesh indices from
+        their local indices.
+        Iglobal = Ilocal + G_start - ghost
+        @param loc_index an array of size domain.dim*2
+        with loc_index = [start_dir1, end_dir1, start_dir2, end_dir2, ...]
+        return an array of indices, same 'setup' as loc_index.
+        """
+        shift = self.mesh.global_start - self.mesh.local_start
+        return [slice(sl[i].start + shift[i], sl[i].stop + shift[i])
+                for i in xrange(self.domain.dimension)]
+
+
+class topotools(object):
+
+    @staticmethod
+    def collectGlobalIndices(topo, comm):
+        """
+        Get global start/end indices on
+        the current process for the local mesh
+        @param topo : the concerned topology
+        @param comm : a communicator that will be
+        used to gather the global indices. Usually
+        the parent of topo.
+        @return iglob : a list of arrays.
+        iglob[rk] = [start_0, end_0, start_1, end_1...]
+        start_i/end_i = global start/end indices for mesh of
+        process number rk in comm.
+        """
+        size = topo.size
+        start = topo.mesh.global_start
+        end = topo.mesh.global_end
+        # communicator that owns the topology
+        rank = comm.Get_rank()
+        iglob = np.zeros((size, topo.domain.dimension * 2), dtype=np.int32)
+        iglob[rank, 0::2] = start
+        iglob[rank, 1::2] = end
+
+        comm.Allgather([iglob[rank, :], MPI.INT], [iglob, MPI.INT])
+        return iglob
diff --git a/HySoP/hysop/mpi/topology.py b/HySoP/hysop/mpi/topology.py
index b405773bd79e364a55655ceaabf8af50b84d3cf6..6b28e506083c0975ca684ed9808c0d8db9cb9d93 100644
--- a/HySoP/hysop/mpi/topology.py
+++ b/HySoP/hysop/mpi/topology.py
@@ -1,214 +1,156 @@
 """
 @file topology.py
-
-Parmes topologies definitions and related tools.
-A Parmes Topology is defined as the association of
-a mpi process distribution (mpi topology) and of a set of local meshes
-(one per process).
-
-At the time, only cartesian topologies with cartesian meshes are available.
-
-This module provides the following classes :
-- Cartesian : 1,2 or 3D parmes topology.
-- Bridge : bridge between two parmes topologies : i.e. what must be
-exchanged between two topologies and how.
-
-To get more details try :
-\code
->> import parmepy.mpi.topology as topo
->> help(topo.Cartesian)
->> help(topo.Bridge)
-\endcode
+Tools and definitions for parmes topologies
+(MPI Processes layout + space discretization)
 
 """
 
-from parmepy.constants import ORDER, np, PARMES_INDEX, debug, PARMES_INTEGER
-from parmepy.mpi.mesh import SubMesh
+from parmepy.constants import debug, ORDER, PERIODIC
+from parmepy.domain.mesh import Mesh
 from itertools import count
 from parmepy.mpi.main_var import MPI
+from parmepy.tools.parameters import Discretization, MPI_params
+import numpy as np
+import parmepy.tools.numpywrappers as npw
+from parmepy.tools.misc import utils
 
 
 class Cartesian(object):
     """
-    Define a MPI cartesian topology with an associated
-    mesh for each sub-domain.
-    Keyword arguments :
-    - domain : the geometry; it must be a box.
-    - globalMeshResolution : user input. Number of points in the domain
-    in each direction. We assume that first point corresponds to origin, and
-    last point to boundary point, whatever the boundary type is.
-    That is x[0] = domain.origin and
-    x[globalMeshResolution-1] = domain.Lengths_x.
-    - dim : dimension of the topology
-    - shape : topology resolution (i.e process layout in each direction).
-    - comm : MPI communicator used to create this topology
-    - periods : periodicity of the topology (in each direction)
-    - ghosts : number of points in the ghost layer
+    A Parmes Topology is defined as the association of
+    a mpi process distribution (mpi topology) and of a set of local meshes
+    (one per process).
+
+    At the time, only cartesian topologies with cartesian meshes are available.
+
+    Example :
+    \code
+    >>> from parmepy.mpi.topology import Cartesian
+    >>> from parmepy.tools.parameters import Discretization
+    >>> from parmepy.domain.box import Box
+    >>> dom = Box()
+    >>> r = Discretization([33, 33, 33])
+    >>> topo = Cartesian(dom, dim=2, discretization=r)
+    >>>
+    \endcode
+    For details about topologies see Parmes User Manual.
+
+    You can also find examples of topologies instanciation in test_topology.py.
 
     """
 
     @debug
     def __new__(cls, *args, **kw):
         return object.__new__(cls, *args, **kw)
-
+#
     # Counter of topology.Cartesian instances to set a unique id for each
     # Cartesian topology instance.
     __ids = count(0)
 
     @debug
-    def __init__(self, domain, dim, globalMeshResolution,
-                 comm=None, periods=None, ghosts=None, cutdir=None,
-                 shape=None,
-                 precomputed=False, localres=None, localoffset=None):
-
-        assert domain.dimension == globalMeshResolution.__len__(), \
-            'The resolution size differs from the domain dimension.'
+    def __init__(self, domain, discretization, dim=None, mpi_params=None,
+                 isperiodic=None, cutdir=None, shape=None, mesh=None):
+        """
+        Required parameters : domain, discretization
+        Others are optional. You must choose one and only one param
+        among dim, cutdir and shape.
+
+        See parmepy.mpi.topology.Cartesian.plane_precomputed
+        details to build a plane topology from a given local discretization
+        (e.g. from fftw or scales precomputation).
+        @param domain : the geometry; it must be a box.
+        @param discretization : a parmepy.tools.parameters.Discretization
+        with:
+        - resolution = Number of points in the domain
+        in each direction. We assume that first point corresponds
+        to origin, and last point to boundary point,
+        whatever the boundary type is.
+        That is x[0] = domain.origin and
+        x[Discretization.resolution-1] = domain.Lengths_x.
+        - ghosts =  number of points in the ghost layer
+        @param dim : dimension of the topology
+        @param mpi_params : a parmepy.tools.parameters.MPI_params, with:
+        - comm : MPI communicator used to create this topology
+         (default = main_comm)
+        - task_id : id of the task that owns this topology.
+        @param isperiodic : periodicity of the topology (in each direction)
+        @param cutdir : array of bool, set cutdir[dir] = True if you want
+        to distribute data through direction dir.
+        @param shape : topology resolution
+        (i.e process layout in each direction).
+        @param mesh : a predefined parmepy.mpi.mesh.SubMesh
+        """
+        # ===== 1 - Required parameters : domain and mpi (comm, task) ====
+        # An id for the topology
+        self.__id = self.__ids.next()
 
         ## Associated domain
         self.domain = domain
-        ## An id for the topology
-        self.__id = self.__ids.next()
+        # - MPI topo params :
+        if mpi_params is None:
+            mpi_params = MPI_params(comm=domain.comm_task,
+                                    task_id=domain.currentTask())
+
+        ## mpi parameters : Communicator used to build the topology
+        ## and task_id
+        self._mpis = mpi_params
+        # Each topo must be associated to one and only one topology
+        assert self._mpis.task_id is not None
+        # ===== 2 - Prepare MPI processes layout ====
+
+        # 3 methods :
+        # (a) - from 'shape' parameter : we choose explicitely the layout.
+        # (b) - from 'cutdir' parameter: we choose the directions
+        # to split and let MPI fix the number of processes in each dir.
+        # (c) - from dimension of the topology ==> let MPI
+        # choose the 'best' layout.
+
+        ## Layout of the grid of mpi processes.
+        self.shape = None
         ## Dim of the cartesian grid for MPI processes.
-        self.dim = dim
-        # Mind the sequential case : dim from withConstr may be equal
-        # to 0 if shape[:] = 1
-        ## (Source) Communicator used to build the topology
-        if comm is None:
-            from parmepy.mpi.main_var import main_comm as comm
-        self._comm_origin = comm
-        ## number of process in comm_origin
-        self._origin_size = self._comm_origin.Get_size()
-        ## True if everything has been properly set for this topology
-        self.isUpToDate = False
-        ## Grid of mpi process layout
-        self.shape = np.ones(self.domain.dimension, dtype=PARMES_INDEX)
-        # MPI processes layout.
-        if shape is None:
-            # If shape is not provided, computation of  the "optimal" processus
-            # distribution for each direction of the grid topology.
-            shape = np.asarray(MPI.Compute_dims(self._origin_size, self.dim),
-                               dtype=PARMES_INDEX)
-
-            if cutdir is not None:
-                self.cutdir = np.array(cutdir, dtype=np.bool)
-                self.shape[self.cutdir] = shape
-            else:
-                # Reorder shape according to the data layout
-                # if arrays are in "fortran" order (column major)
-                # the last dir has priority for distribution.
-                # For C-like (row major) arrays, first dir is the
-                # first to be distributed, and so on.
-                self.shape[:self.dim] = shape
-                self.shape.sort()
-                if(ORDER == 'C'):
-                    self.shape[:] = self.shape[::-1]
-                self.cutdir = self.shape != 1
-            # redim if necessary ...
-            self.dim = self.shape[self.cutdir].size
+        self.dimension = None
+        # MPI grid periodicity
+        self._isperiodic = None
+        # directions where data are distributed
+        self.cutdir = None
+        ## mpi communicator (cartesian topo)
+        self.comm = None
 
-        else:
-            shape = np.asarray(shape)
-            assert shape.size == self.domain.dimension, 'Input shape must be \
-                of the same size as the domain dimension.'
-            if cutdir is None:
-                self.cutdir = shape != 1
-            else:
-                self.cutdir = cutdir
-
-            self.shape = shape
-            self.dim = self.shape[self.cutdir].size
-
-        # Special care for the 1 process case:
-        if self._origin_size == 1:
-            self.dim = 1
-            self.cutdir = shape != 1
-            self.cutdir[-1] = True
-
-        ## MPI process grid periodicity; default is true.
-        self.periods = np.ones((self.dim), dtype=np.bool)
-        if(periods is not None):
-            assert (periods.size == self.dim)
-            self.periods[:] = periods[:]
-        self.comm = self._comm_origin.Create_cart(self.shape[self.cutdir],
-                                                  periods=self.periods,
-                                                  reorder=True)
+        self._build_mpi_topo(shape, cutdir, dim, isperiodic)
 
+        # ===== 3 - Get features of the mpi processes grid =====
         ## Size of the topology (i.e. total number of mpi processes)
         self.size = self.comm.Get_size()
         ## Rank of the current process in the topology
         self.rank = self.comm.Get_rank()
         ## Coordinates of the current process
-        self.reduced_coords = np.asarray(self.comm.Get_coords(self.rank),
-                                         dtype=PARMES_INDEX)
+        reduced_coords = npw.asdimarray(self.comm.Get_coords(self.rank))
+
         ## Coordinates of the current process
         ## What is different between proc_coords and reduced_coords?
         ## --> proc_coords has values even for directions that
         ## are not distributed. If cutdir = [False, False, True]
         ## then reduced_coords = [ nx ] and proc_coords = [0, 0, nx]
-        self.proc_coords = np.zeros(self.domain.dimension, dtype=PARMES_INDEX)
-        self.proc_coords[self.cutdir] = self.reduced_coords
+        self.proc_coords = npw.dim_zeros(self.domain.dimension)
+        self.proc_coords[self.cutdir] = reduced_coords
         ## Neighbours : self.neighbours[0,i] (resp. [1,i])
         ## previous (resp. next) neighbour in direction i
         ## (warning : direction in the grid of process).
-        self.neighbours = np.zeros((2, self.dim), dtype=PARMES_INTEGER)
-        for direction in range(self.dim):
+        self.neighbours = npw.dim_zeros((2, self.dimension))
+        for direction in range(self.dimension):
             self.neighbours[:, direction] = self.comm.Shift(direction, 1)
 
-        ## ghost layer (default = 0)
-        if(ghosts is None):
-            self.ghosts = np.zeros((self.domain.dimension),
-                                   dtype=PARMES_INDEX)
-        else:
-            self.ghosts = np.asarray(ghosts, dtype=PARMES_INTEGER)
-            assert(self.ghosts.size == domain.dimension)
-        assert(np.all(self.ghosts >=0))
-        self.hasGhosts = not np.all(self.ghosts == 0)
-
-        # --- Computation of the local mesh resolution and indices ---
-        ##  Resolution of the global mesh
-        self.globalMeshResolution = np.asarray(globalMeshResolution,
-                                               dtype=PARMES_INDEX)
-
-        # If local resolution has already been computed
-        # (for example from fftw init output)
-        if precomputed:
-            self.G_start = localoffset
-            self.localGridResolution = localres
-
-        # Usual case : we compute local resolution/offset
-        else:
-            # Number of "computed" points (i.e. excluding ghosts/boundaries).
-            pts_noghost = np.zeros((self.domain.dimension), dtype=PARMES_INDEX)
-            # Warning FP : we should test boundary conditions type here
-            # If periodic, resol_calc = (globalMeshResolution - 1)
-            # else, resol_calc =  (globalMeshResolution - 2)
-            resolCalc = np.zeros((domain.dimension), dtype=PARMES_INDEX)
-            resolCalc[:] = self.globalMeshResolution[:] - 1
-            pts_noghost[:] = resolCalc // self.shape
-
-            # If any, remaining points are
-            # added on the mesh of the last process.
-            remainingPts = np.zeros(self.domain.dimension, dtype=PARMES_INDEX)
-            remainingPts[:] = resolCalc % self.shape
-
-            # Then the total number of points (size of arrays to be allocated)
-            nbpoints = pts_noghost.copy()
-            for i in range(self.domain.dimension):
-                if(self.proc_coords[i] == self.shape[i] - 1):
-                    nbpoints[i] += remainingPts[i]
-
-            self.localGridResolution = resolCalc.copy()
-            self.localGridResolution[:] = nbpoints[:]
-            self.localGridResolution[:] += 2 * self.ghosts[:]
-
-            ## Global indices for the local mesh points;
-            ## self.G_start[i] is the index of the first
-            ## point in the i direction (It includes ghosts/boundary points).
-            self.G_start = np.zeros((domain.dimension), dtype=PARMES_INDEX)
-            self.G_start[:] = self.proc_coords[:] * pts_noghost[:]
+        # ===== 4 - Computation of the local meshes =====
 
         ## Local mesh on the current mpi process.
-        self.mesh = None
+        # mesh from external function (e.g. fftw, scales ...)
+        self.mesh = mesh
+        # If mesh is None, we must compute local resolution and other
+        # parameters, using discretization.
+        if mesh is None:
+            self._computeMesh(discretization)
+
+        # ===== 5 - Final setup ====
 
         # The topology is register into domain list.
         # If it already exists (in the sense of the comparison operator
@@ -216,151 +158,222 @@ class Cartesian(object):
         # and the present instance will be destroyed.
         self.isNew = True
         self.__id = self.domain.register(self)
+        # If a similar topology (in the sense of operator
+        # equal defined below) exists, we link
+        # its arg with those of the current topo.
+        # It seems to be impossible to delete the present
+        # object in its __init__ function.
+        if not self.isNew:
+            topo = self.domain.topologies[self.__id]
+            self.mesh = topo.mesh
+            self.comm = topo.comm
+
+#        self.is_registered = False
+ 
+    def _build_mpi_topo(self, shape, cutdir, dim, isperiodic):
+        """
+        Build mpi topology
+        """
+        # number of process in parent comm
+        origin_size = self._mpis.comm.Get_size()
+
+        if shape is not None:
+            # method (a)
+            msg = ' parameter is useless when shape is provided.'
+            assert cutdir is None, 'cutdir ' + msg
+            assert dim is None, 'dim ' + msg
+            self.shape = npw.asdimarray(shape)
+            msg = 'Input shape must be of '
+            msg += 'the same size as the domain dimension.'
+            assert self.shape.size == self.domain.dimension, msg
+            self.cutdir = self.shape != 1
+
+        elif cutdir is not None:
+            # method (b)
+            msg = ' parameter is useless when cutdir is provided.'
+            assert shape is None, 'shape ' + msg
+            assert dim is None, 'dim ' + msg
+            self.cutdir = npw.asboolarray(cutdir)
+            self.dimension = self.cutdir[self.cutdir].size
+            shape = npw.asdimarray(MPI.Compute_dims(origin_size,
+                                                    self.dimension))
+            self.optimizeshape(shape)
+            self.shape = npw.dim_ones(self.domain.dimension)
+            self.shape[self.cutdir] = shape
+
+        else:
+            if dim is not None:
+                # method (a)
+                msg = ' parameter is useless when dim is provided.'
+                assert shape is None, 'shape ' + msg
+                assert cutdir is None, 'cutdir ' + msg
+                self.dimension = dim
+            else:
+                # dim, shape and cutdir are None ...
+                # ==> default behavior is let MPI compute
+                # the best layout for a topology of the
+                # same dim as the domain
+                self.dimension = self.domain.dimension
+
+            # Since shape is not provided, computation of the
+            # "optimal" processes distribution for each direction
+            # of the grid topology.
+            shape = npw.asdimarray(MPI.Compute_dims(origin_size,
+                                                    self.dimension))
+            self.shape = npw.dim_ones(self.domain.dimension)
+            # Reorder shape according to the data layout
+            # if arrays are in "fortran" order (column major)
+            # the last dir has priority for distribution.
+            # For C-like (row major) arrays, first dir is the
+            # first to be distributed, and so on.
+            self.shape[:self.dimension] = shape
+            self.optimizeshape(self.shape)
+            self.cutdir = self.shape != 1
+
+        ## MPI processes grid periodicity. Default is true.
+        if isperiodic is None:
+            self._isperiodic = npw.ones((self.domain.dimension),
+                                        dtype=npw.bool)
+        else:
+            self._isperiodic = npw.asboolarray(isperiodic)
+            assert isperiodic.size == self.domain.dimension
+
+        # compute real dim ...
+        self.dimension = self.shape[self.cutdir].size
+
+        # Special care for the 1 process case:
+        if origin_size == 1:
+            self.dimension = 1
+            self.cutdir[-1] = True
+
+        # From this point, the following parameters must be properly set:
+        # - self.shape
+        # - self.cutdir
+        # - self._isperiodic
+        # Then, we can create the mpi topology.
+        self.comm = \
+            self._mpis.comm.Create_cart(self.shape[self.cutdir],
+                                        periods=self._isperiodic[self.cutdir],
+                                        reorder=True)
+
+    @staticmethod
+    def optimizeshape(shape):
+        """
+        Reorder 'shape' according to the chosen
+        data layout to optimize data distribution.
+        """
+        shape.sort()
+        if ORDER == 'C':
+            shape[:] = shape[::-1]
 
     def parent(self):
         """
         returns the communicator used to build this topology
         """
-        return self._comm_origin
+        return self._mpis.comm
 
-    @debug
-    def setUp(self):
-        """Topology set up.
+    def ghosts(self):
+        """
+        Get ghost layer width.
+        """
+        return self.mesh.discretization.ghosts
 
-        Create topology's Local Mesh.
-        This function is called during registration in the domain
-        (domain.register(topo)).
+    def task_id(self):
+        """
+        @return id of the task that owns this topology
         """
-        if not self.isUpToDate:
-            self.mesh = SubMesh(self, self.G_start, self.localGridResolution)
-        self.isUpToDate = True
+        return self._mpis.task_id
 
     @classmethod
-    def withResolution(cls, domain, shape,
-                       globalMeshResolution, ghosts=None, periods=None,
-                       comm=None):
-        """
-        Compute a topology for a given shape of the processus grid.
-        If the dimension of the topology is smaller than the domain dimension,
-        shape will be reordered according to the following rule :
-        (depends on array storage type (C or Fortran))
-        - if C : first dir, first distributed.
-        - if Fortran : last dir, first distributed.
-        @param domain in which the topology is defined
-        @param topoResolution : mpi process grid resolution (MUST be of
-        the same size as domain), set topoResolution[i] = 1 if
-        you don't want to cut the domain in direction i.
-        @param ghosts : number of ghost points in each direction, default = 1.
-        @param periods : periodicity in each direction, default = true.
-        """
-
-        shape = np.asarray(shape, dtype=PARMES_INDEX)
-        assert shape.size == domain.dimension, 'Array for topo \
-            resolution must be of size domain.dimension'
-        dim = shape[shape != 1].size
-        assert shape.all(), "Topology error : you try to \
-            create a topology with 0 resolution in one direction."
-        if dim < domain.dimension:
-            shape.sort()
-            if(ORDER == 'C'):
-                shape[:] = shape[::-1]
-        cutdir = shape != 1
-
-        return cls(domain, dim, globalMeshResolution, cutdir=cutdir,
-                   periods=periods, ghosts=ghosts, shape=shape,
-                   comm=comm)
+    def plane_precomputed(cls, localres, global_start, cdir=None, **kwds):
+        """
+        Define a 'plane' (1D) topology for a given mesh resolution.
+        This function is to be used when topo/discretization features
+        come from an external routine (e.g. scales or fftw)
+        @param localres : local mesh resolution
+        @param global_start : global indices of the lowest point
+        of the local mesh
+        @param cdir : direction of cutting (i.e. normal to mpi planes)
+        default = last if fortran order, first if C order.
+        """
+        msg = 'parameter is not required for plane_precomputed'
+        msg += ' topology construction.'
+        assert 'dim' not in kwds, 'dim ' + msg
+        assert 'shape ' not in kwds, 'shape ' + msg
+        assert 'cutdir ' not in kwds, 'cutdir ' + msg
+        # Local mesh :
+        global_start = npw.asdimarray(global_start)
+        localres = npw.asdimarray(localres)
+        mesh = Mesh(kwds['domain'], kwds['discretization'],
+                    localres, global_start)
+        # MPI layout
+        domain = kwds['domain']
+        cutdir = npw.zeros(domain.dimension, dtype=npw.bool)
+
+        if cdir is not None:
+            cutdir[cdir] = True
+        else:
+            if ORDER == 'C':
+                cutdir[0] = True
+            else:
+                cutdir[-1] = True
 
-    @classmethod
-    def withResolutionFixed(cls, domain, shape,
-                            globalMeshResolution, ghosts=None, periods=None,
-                            comm=None):
-        """
-        Compute a topology for a given shape of the processus grid.
-        @param domain in which the topology is defined
-        @param topoResolution : mpi process grid resolution (MUST be of
-        the same size as domain), set topoResolution[i] = 1 if
-        you don't want to cut the domain in direction i.
-        @param ghosts : number of ghost points in each direction, default = 1.
-        @param periods : periodicity in each direction, default = true.
-        """
-        shape = np.asarray(shape, dtype=PARMES_INDEX)
-        assert shape.size == domain.dimension, 'Array for topo \
-            resolution must be of size domain.dimension'
-        dim = shape[shape != 1].size
-        assert shape.all(), "Topology error : you try to \
-            create a topology with 0 resolution in one direction."
-        cutdir = shape != 1
-        return cls(domain, dim, globalMeshResolution,
-                   cutdir=cutdir, periods=periods, ghosts=ghosts, shape=shape,
-                   comm=comm)
+        return cls(mesh=mesh, cutdir=cutdir, **kwds)
 
-    @classmethod
-    def withCutdir(cls, domain, cutdir,
-                   globalMeshResolution, ghosts=None, periods=None,
-                   comm=None):
-        """
-        Compute a topology from a list of directions to be cut.
-        @param domain in which the topology is defined
-        @param cutdir : array of bool, cutdir[i] = true if mesh must be
-        distributed in direction i.
-        (cutdir must be of the same size as the domain).
-        @param ghosts : number of ghost points in each direction, default = 1.
-        @param periods : periodicity in each direction, default = true.
-        """
-        cutdir = np.asarray(cutdir, dtype=np.bool)
-        dim = cutdir[cutdir].size
-        return cls(domain, dim, globalMeshResolution,
-                   cutdir=cutdir, periods=periods, ghosts=ghosts,
-                   comm=comm)
+    def _computeMesh(self, discretization):
+        assert isinstance(discretization, Discretization)
+        assert discretization.resolution.size == self.domain.dimension
+        assert self.domain.dimension == discretization.resolution.size, \
+            'The resolution size differs from the domain dimension.'
+        # Number of "computed" points (i.e. excluding ghosts/boundaries).
+        pts_noghost = npw.dim_zeros((self.domain.dimension))
+        # Warning FP : we should test boundary conditions type here
+        # If periodic, resol_calc = (global_resolution - 1)
+        # else, resol_calc =  (global_resolution - 2)
+
+        is_periodic = len(np.where(self.domain.boundaries == PERIODIC)[0])\
+            == self.domain.dimension
+        if is_periodic:
+            resolCalc = discretization.resolution - 1
+        else:
+            raise AttributeError('Unknwon boundary conditions.')
 
-    @classmethod
-    def withPrecomputedResolution(cls, domain, shape,
-                                  globalMeshResolution, localres,
-                                  offset, ghosts=None, periods=None,
-                                  comm=None):
-        """
-        Compute a topology for a given shape of the processus grid
-        and a given local mesh resolution/offset (example : output from
-        fftw init process).
-        @param domain in which the topology is defined
-        @param topoResolution : mpi process grid resolution (MUST be of
-        the same size as domain), set topoResolution[i] = 1 if
-        you don't want to cut the domain in direction i.
-        @param localres : local mesh resolution
-        @param offset : local offset (i.e. global index of first local mesh
-        points)
-        @param ghosts : number of ghost points in each direction, default = 1.
-        @param periods : periodicity in each direction, default = true.
-        """
-        shape = np.asarray(shape, dtype=PARMES_INDEX)
-        assert shape.size == domain.dimension, 'Array for topo \
-            resolution must be of size domain.dimension'
-        dim = shape[shape != 1].size
-        assert shape.all(), "Topology error : you try to \
-            create a topology with 0 resolution in one direction."
-        cutdir = shape != 1
-
-        return cls(domain, dim, globalMeshResolution,
-                   precomputed=True, localres=localres, localoffset=offset,
-                   periods=periods, ghosts=ghosts, cutdir=cutdir, shape=shape,
-                   comm=comm)
+        pts_noghost[:] = resolCalc // self.shape
+
+        # If any, remaining points are
+        # added on the mesh of the last process.
+        remainingPts = npw.dim_zeros(self.domain.dimension)
+        remainingPts[:] = resolCalc % self.shape
+
+        # Total number of points (size of arrays to be allocated)
+        nbpoints = pts_noghost.copy()
+        for i in range(self.domain.dimension):
+            if self.proc_coords[i] == self.shape[i] - 1:
+                nbpoints[i] += remainingPts[i]
+
+        local_resolution = resolCalc.copy()
+        local_resolution[:] = nbpoints[:]
+        local_resolution[:] += 2 * discretization.ghosts[:]
+
+        ## Global indices for the local mesh points
+        global_start = npw.dim_zeros((self.domain.dimension))
+        global_start[:] = self.proc_coords[:] * pts_noghost[:]
+
+        self.mesh = Mesh(self.domain, discretization,
+                         local_resolution, global_start)
 
     def __eq__(self, other):
         """
         Operator for comparison. Based on:
-        @li globalMeshResolution
-        @li localMeshResolution
+        @li mesh
         @li shape
         @li domain
         @param other : Topology to compare with.
-        @todo completer le test dans parmepy/mpi/tests/test_topology.py
-        """
-        return np.equal(self.globalMeshResolution,
-                        other.globalMeshResolution).all() and \
-            np.equal(self.localGridResolution,
-                     other.localGridResolution).all() and \
-            np.equal(self.shape, other.shape).all() and \
-            np.equal(self.ghosts, other.ghosts).all() and \
+        """
+        if self.__class__ != other.__class__:
+            return False
+        return self.mesh == other.mesh and \
+            npw.equal(self.shape, other.shape).all() and \
             self.domain == other.domain
 
     def __ne__(self, other):
@@ -378,82 +391,253 @@ class Cartesian(object):
     def __str__(self):
         """ Topology info display """
         s = '======== Topology id : ' + str(self.__id) + ' ========\n'
-        if(self.rank == 0):
-            s += str(self.dim) + 'D cartesian Topology of size ' \
-                + str(self.shape) + '\n'
-        s += 'Process of coordinates ' + str(self.proc_coords[:])
+        s += ' - on task : ' + str(self.task_id()) + '\n'
+        s += ' - shape :' + str(self.shape) + '\n'
+        s += ' - process of coordinates ' + str(self.proc_coords[:])
         s += ' and of ranks (topo/origin) ' + str(self.rank) + '/'
-        s += str(self._comm_origin.Get_rank()) + '.\n'
-        s += 'Neighbours coordinates : \n'
+        s += str(self._mpis.rank) + '.\n'
+        s += '- neighbours coordinates : \n'
         s += str(self.neighbours) + '\n'
-        s += 'Ghost layer : ' + str(self.ghosts[:]) + '\n'
-        s += 'Global mesh resolution : ' + str(self.globalMeshResolution)
-        s += '\n'
         s += str(self.mesh)
         s += '\n=================================\n'
         return s
 
-    def getId(self):
+    def hasGhosts(self):
         """
-        @return the id of the present topology.
-        This id is unique among all defined topologies.
+        True if ghost layer length is not zero.
         """
-        return self.__id
+        return not np.all(self.mesh.discretization.ghosts == 0)
 
-    def toIndexLocal(self, glob_index):
+    def get_id(self):
         """
-        Return the local mesh indices from
-        their global indices.
-        Ilocal = Iglobal - G_start + ghost
-        @param glob_index an array of size domain.dim*2
-        with glob_index = [start_dir1, end_dir1, start_dir2, end_dir2, ...]
-        return an array of indices, same 'setup' as glob_index.
+        return the id of the present topology.
+        This id is unique among all defined topologies.
         """
-        ilocal = np.asarray(glob_index).copy()
-        ilocal[0::2] = ilocal[0::2] - self.G_start + self.ghosts
-        ilocal[1::2] = ilocal[1::2] - self.G_start + self.ghosts
-        return ilocal
+        return self.__id
+
+    def isConsistentWith(self, topo):
+        same_parent = self.parent() == topo.parent()
+        # Note FP. Is it really required to have the
+        # same parent? Inclusion of all proc may be enough?
+        return npw.equal(self.shape, topo.shape).all() and same_parent
 
-    def toIndexGlobal(self, sl):
+    def canCommunicateWith(self, target):
         """
-        Return the global mesh indices from
-        their local indices.
-        Iglobal = Ilocal + G_start - ghost
-        @param loc_index an array of size domain.dim*2
-        with loc_index = [start_dir1, end_dir1, start_dir2, end_dir2, ...]
-        return an array of indices, same 'setup' as loc_index.
+        return True if current topo is complient with topo.
+        (See below for what 'complient' implies)
+        @param target : the targeted topology
+
+        Complient if :
+        - all processes in current are in topo
+        - both topologies belong to the same mpi task
         """
-        g_start = self.mesh.global_start
-        return [slice(sl[i].start + g_start[i], sl[i].stop + g_start[i])
-                for i in xrange(self.domain.dimension)]
+        if self == target:
+            return True
+        msg = 'You try to connect topologies belonging to'
+        msg += ' two different mpi tasks. Set taskids properly or use'
+        msg += ' InterBridge.'
+        assert self.task_id() == target.task_id(), msg
+
+        ## Parent communicator
+        ## Todo : define some proper conditions for compatibility
+        ## between topo_from, topo_to and parent:
+        ## - same size
+        ## - same domain
+        ## - common processus ...
+        ## At the time we check that both topo have
+        ## the same comm_origin.
+        return self.isConsistentWith(target)
+
+    @staticmethod
+    def reset_counter():
+        Cartesian.__ids = count(0)
 
 
-class topotools():
+class topotools(object):
 
     @staticmethod
-    def collectGlobalIndices(topo, comm):
-        """
-        Get global start/end indices on
-        the current process for the local mesh
-        @param topo : the concerned topology
-        @param comm : a communicator that will be
-        used to gather the global indices. Usually
-        the parent of topo.
-        @return iglob : a list of arrays.
-        iglob[rk] = [start_0, end_0, start_1, end_1...]
-        start_i/end_i = global start/end indices for mesh of
-        process number rk in comm.
-        """
-        size = topo.size
-        start = topo.mesh.global_start
-        end = topo.mesh.global_end
+    def gatherGlobalIndices(topo, toslice=True, root=None, comm=None):
+        """
+        Collect global indices of local meshes on each process of topo
+        @param topo : a parmepy.mpi.topology.Cartesian
+        @param toslice : true  (default) if you want a dict of slice
+        as return value, false if you need a numpy array.
+        @param root : rank (in topo.parent()) of the gathering process.
+        If root is None (default) indices are gathered
+        on all processes of topo.
+        @param comm : communicator used for global communications.
+        Default = topo.parent().
+        @return : either :
+        - a dictionnary which maps rank number with
+        a list of slices such that res[rank][i] = a slice
+        defining the indices of the points of the local mesh,
+        in direction i, in global notation.
+        - a numpy array, each column corresponds to a rank number,
+        with column = [start_x, end_x, start_y, end_y ...]
+
+        Ranks number are the processes numbers in topo.parent().
+        """
+        if comm is None:
+            comm = topo.parent()
+        size = comm.size
+        start = topo.mesh.start()
+        end = topo.mesh.stop() - 1
         # communicator that owns the topology
         rank = comm.Get_rank()
-        iglob = np.zeros((size, topo.domain.dimension * 2), dtype=np.int32)
-        iglob[rank, 0::2] = start
-        iglob[rank, 1::2] = end
-        
-        comm.Allgather([iglob[rank, :], MPI.INT], [iglob, MPI.INT])
-        return iglob
-
-    
+        dimension = topo.domain.dimension
+        iglob = npw.int_zeros((dimension * 2, size))
+        iglob[0::2, rank] = start
+        iglob[1::2, rank] = end
+        # iglob is saved as a numpy array and then transform into
+        # a dict of slices since mpi send operations are much
+        # more efficient with numpy arrays.
+        if root is None:
+            comm.Allgather([iglob[:, rank], MPI.INT], [iglob, MPI.INT])
+        else:
+            comm.Gather([iglob[:, rank], MPI.INT], [iglob, MPI.INT], root=root)
+
+        if toslice:
+            return utils.arrayToDict(iglob)
+        else:
+            return iglob
+
+    @staticmethod
+    def gatherGlobalIndicesOverlap(topo=None, comm=None, dom=None,
+                                   toslice=True, root=None):
+        """
+        This functions does the same thing as gatherGlobalIndices but
+        may also work when topo is None.
+        The function is usefull if you need to collect global indices
+        on a topo define only on a subset of comm,
+        when for the procs not in this subset, topo will be
+        equal to None. In such a case, comm and dom are required.
+        This may happen when you want to build a bridge between two topologies
+        that do not handle the same number of processes but with an overlap
+        between the two groups of processes of the topologies.
+
+        In that case, a call to
+        gatherGlobalIndices(topo, comm, dom)
+        will work on all processes belonging to comm, topo being None or not.
+        The values corresponding to ranks not in topo will be empty slices.
+        """
+        if topo is None:
+            assert comm is not None and dom is not None
+            size = comm.Get_size()
+            rank = comm.Get_rank()
+            dimension = dom.dimension
+            iglob = npw.int_zeros((dimension * 2, size))
+            iglob[1::2, rank] = -1
+            if root is None:
+                comm.Allgather([iglob[:, rank], MPI.INT], [iglob, MPI.INT])
+            else:
+                comm.Gather([iglob[:, rank], MPI.INT], [iglob, MPI.INT],
+                            root=root)
+            if toslice:
+                return utils.arrayToDict(iglob)
+            else:
+                return iglob
+
+        else:
+            return topotools.gatherGlobalIndices(topo, toslice, root, comm)
+
+    @staticmethod
+    def is_parent(child, parent):
+        """
+        Return true if all mpi processes of child belongs to parent
+        """
+        # Get the list of processes
+        assert child is not None
+        assert parent is not None
+        #child_ranks = [i for i in xrange(child.Get_size())]
+        child_group = child.Get_group()
+        parent_group = parent.Get_group()
+        inter_group = MPI.Group.Intersect(child_group, parent_group)
+        return child_group.Get_size() == inter_group.Get_size()
+
+    @staticmethod
+    def intersection_size(comm_1, comm_2):
+        if comm_1 == MPI.COMM_NULL or comm_2 == MPI.COMM_NULL:
+            return None
+        group_1 = comm_1.Get_group()
+        group_2 = comm_2.Get_group()
+        inter_group = MPI.Group.Intersect(group_1, group_2)
+        return inter_group.Get_size()
+
+    @staticmethod
+    def compare_comm(comm_1, comm_2):
+        """
+        Compare two mpi communicators.
+        Return true if the two communicators are handles for the same
+        group of proc and for the same communication context.
+        @param comm_1 : a mpi communicator
+        @param comm_2 : a mpi communicator
+        Warning : if comm_1 or comm_2 is invalid, the
+        function will fail.
+        """
+        assert comm_1 != MPI.COMM_NULL
+        assert comm_2 != MPI.COMM_NULL
+        result = MPI.Comm.Compare(comm_1, comm_2)
+        res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]
+        return result == res[0]
+
+    @staticmethod
+    def compare_groups(comm_1, comm_2):
+        """
+        Compare two mpi communicators.
+        Return true if each comm handles the
+        same group of mpi processes.
+        @param comm_1 : a mpi communicator
+        @param comm_2 : a mpi communicator
+        Warning : if comm_1 or comm_2 is invalid, the
+        function will fail.
+        """
+        assert comm_1 != MPI.COMM_NULL
+        assert comm_2 != MPI.COMM_NULL
+        result = MPI.Comm.Compare(comm_1, comm_2)
+        res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]
+        return result in res[:-1]
+
+    @staticmethod
+    def convert_ranks(source, target):
+        """
+        find the values of ranks in target from ranks in source.
+        @param source : a mpi communicator
+        @param target : a mpi communicator
+        @return a list 'ranks' such that ranks[i] = rank in target
+        of process of rank i in source.
+        """
+        assert source != MPI.COMM_NULL and target != MPI.COMM_NULL
+        g_source = source.Get_group()
+        g_target = target.Get_group()
+        size_source = g_source.Get_size()
+        r_source = [i for i in xrange(size_source)]
+        res = MPI.Group.Translate_ranks(g_source, r_source, g_target)
+        return {r_source[i]: res[i] for i in xrange(size_source)}
+
+    @staticmethod
+    def createSubArray(sl_dict, data_shape):
+        """
+        Create a MPI subarray mask to be used in send/recv operations
+        between some topologies.
+        @param[in] sl_dict : dictionnary which contains mesh indices
+        of the subarray for each rank,
+        such that sl_dict[rk] = (slice(...), slice(...), ...)
+        @param[in] data_shape : shape (numpy-like) of the original array
+        @return : dictionnary of MPI derived types.
+        Keys = ranks in parent communicator.
+        """
+        from parmepy.constants import PARMES_MPI_REAL, ORDERMPI
+        subtypes = {}
+        dim = len(data_shape)
+        for rk in sl_dict.keys():
+            subvshape = tuple((sl_dict[rk][i].stop -
+                               sl_dict[rk][i].start for i in xrange(dim)))
+            substart = tuple((sl_dict[rk][i].start for i in xrange(dim)))
+            subtypes[rk] = \
+                PARMES_MPI_REAL.Create_subarray(data_shape,
+                                                subvshape,
+                                                substart,
+                                                order=ORDERMPI)
+            subtypes[rk].Commit()
+
+        return subtypes
diff --git a/HySoP/hysop/mpi/topology.pyc b/HySoP/hysop/mpi/topology.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..56176756a908e2c7c4e965d36c2ad8287a1ace19
Binary files /dev/null and b/HySoP/hysop/mpi/topology.pyc differ
diff --git a/HySoP/hysop/numerics/__init__.pyc b/HySoP/hysop/numerics/__init__.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6690beef5a02ed15b06932410a32ef1cb60469dd
Binary files /dev/null and b/HySoP/hysop/numerics/__init__.pyc differ
diff --git a/HySoP/hysop/numerics/differential_operations.py b/HySoP/hysop/numerics/differential_operations.py
index 184ee2eabbf6d18d96395e1a5031a2ffa15179ec..7561a27616f5b7d9ba69aad266af935d66460ff8 100755
--- a/HySoP/hysop/numerics/differential_operations.py
+++ b/HySoP/hysop/numerics/differential_operations.py
@@ -8,6 +8,7 @@ from parmepy.constants import debug, XDIR, YDIR, ZDIR
 from abc import ABCMeta, abstractmethod
 from parmepy.numerics.finite_differences import FD_C_4, FD_C_2, FD2_C_2
 import numpy as np
+import parmepy.tools.numpywrappers as npw
 
 
 class DifferentialOperation(object):
@@ -31,9 +32,10 @@ class DifferentialOperation(object):
     def getWorkLengths(nb_components=None, domain_dim=None, fd_method=None):
         """
         Compute the number of required work arrays for this method.
-        @param nb_components : number of components of the
+        @param nb_components : number of components of the fields
+        on which this method operates.
         @param domain_dim : dimension of the domain
-        fields on which this method operates.
+        @param fd_method : name of the finite differences method used.
         @return length of list of work arrays of reals.
         """
         assert nb_components is None
@@ -62,7 +64,7 @@ class Curl(DifferentialOperation):
             self.fcall = self.FDCentral
             # check ghosts ... must be updated when
             # other fd schemes will be implemented.
-            assert (topo.ghosts >= 2).all(),\
+            assert (topo.ghosts() >= 2).all(),\
                 'you need a ghost layer for FD4 scheme.'
         elif method is FD_C_2:
             # - 2nd ordered FD,
@@ -76,13 +78,17 @@ class Curl(DifferentialOperation):
             self.fcall = self.FDCentral
             # check ghosts ... must be updated when
             # other fd schemes will be implemented.
-            assert (topo.ghosts >= 1).all(),\
+            assert (topo.ghosts() >= 1).all(),\
                 'you need a ghost layer for FD4 scheme.'
 
         else:
             raise ValueError("FD scheme Not yet implemented")
 
         self._indices = topo.mesh.iCompute
+        if len(self._indices) == 2:
+            # a 'fake' curl for 2D case
+            self.fcall = self.FDCentral_2D
+
         # initialize fd scheme
         self.fd_scheme.computeIndices(self._indices)
 
@@ -120,6 +126,18 @@ class Curl(DifferentialOperation):
 
         return result
 
+    def FDCentral_2D(self, variable, result):
+        """
+        @param variable : input vector field
+        @param result : list of numpy arrays to save result
+        """
+        #-- d/dx vy in result[ZDIR]
+        self.fd_scheme.compute(variable[YDIR], XDIR, result[0])
+        # result_z = d/dx vy - d/dy vx
+        self.fd_scheme.compute(variable[XDIR], YDIR, self._work[0])
+        result[0][self._indices] -= self._work[0][self._indices]
+        return result
+
 
 class DivV(DifferentialOperation):
     """
@@ -160,7 +178,7 @@ class DivV(DifferentialOperation):
                 self.fcall = self.FDCentral4
             # check ghosts ... must be updated when
             # other fd schemes will be implemented.
-            assert (topo.ghosts >= 2).all(),\
+            assert (topo.ghosts() >= 2).all(),\
                 'you need a ghost layer for FD4 scheme.'
 
         else:
@@ -200,7 +218,7 @@ class DivV(DifferentialOperation):
 
         # _work[0:1] are used as temporary space
         # for computation
-        # div computations are accumulate into result.
+        # div computations are accumulated into result.
         # result does not need initialisation to zero.
 
         # d/dx (scal * var1x), saved into result
@@ -288,8 +306,6 @@ class DivWV(DifferentialOperation):
         return self.fcall(var1, var2, result)
 
     def FDCentral4(self, var1, var2, result):
-        """
-        """
         assert len(result) == len(var1)
 
         # Note FP var1[dir] and var2[dir] must be different from result.
@@ -338,13 +354,13 @@ class GradS(DifferentialOperation):
         if method is FD_C_4:
             self.fcall = self.FDCentral
             self.fd_scheme = FD_C_4(topo.mesh.space_step)
-            assert (topo.ghosts >= 2).all(),\
+            assert (topo.ghosts() >= 2).all(),\
                 'you need a ghost layer for FD4 scheme.'
 
         elif method is FD_C_2:
             self.fcall = self.FDCentral
             self.fd_scheme = FD_C_2(topo.mesh.space_step)
-            assert (topo.ghosts >= 1).all(),\
+            assert (topo.ghosts() >= 1).all(),\
                 'you need a ghost layer for FD2 scheme.'
 
         else:
@@ -415,7 +431,7 @@ class GradVxW(DifferentialOperation):
             # declare and create fd scheme
             self.fd_scheme = FD_C_4(topo.mesh.space_step)
             # connect to fd function call
-            assert (topo.ghosts >= 2).all(),\
+            assert (topo.ghosts() >= 2).all(),\
                 'you need a ghost layer for FD4 scheme.'
             self.fcall = self.FDCentral4_diag
         else:
@@ -442,8 +458,6 @@ class GradVxW(DifferentialOperation):
         return self.fcall(var1, var2, result, diagnostics)
 
     def FDCentral4_diag(self, var1, var2, result, diagnostics):
-        """
-        """
         assert len(result) == len(var1)
         nbc = len(var1)
         diagnostics[:] = 0.0
@@ -462,6 +476,6 @@ class GradVxW(DifferentialOperation):
                 # compute self._work = self._work.var2[cdir]
                 self._work[0][...] = self._work[0] * var2[cdir]
                 # sum to obtain nabla(var_comp) . var2, saved into result[comp]
-                np.add(result[comp], self._work[0], result[comp])
+                npw.add(result[comp], self._work[0], result[comp])
             diagnostics[1] = max(diagnostics[1], np.max(self._work[1]))
         return result, diagnostics
diff --git a/HySoP/hysop/numerics/integrators/odesolver.py b/HySoP/hysop/numerics/integrators/odesolver.py
index 21d2357ac453e55782cc680270b738f5c212bf48..57b078c605d56ab757505ea7123002a410305122 100644
--- a/HySoP/hysop/numerics/integrators/odesolver.py
+++ b/HySoP/hysop/numerics/integrators/odesolver.py
@@ -7,7 +7,7 @@ Abstract class for time integrators.
 from abc import ABCMeta, abstractmethod
 from parmepy.numerics.method import NumMethod
 from parmepy.constants import WITH_GUESS
-from parmepy.numerics.updateGhosts import UpdateGhosts
+from parmepy.numerics.update_ghosts import UpdateGhosts
 import parmepy.tools.numpywrappers as npw
 
 
diff --git a/HySoP/hysop/numerics/interpolation.py b/HySoP/hysop/numerics/interpolation.py
index 8a321125ca43986b6c19f08ca8b44f5e53be0d9c..55c59e5626832cfaa341719b35edd32fdc9ff4d3 100644
--- a/HySoP/hysop/numerics/interpolation.py
+++ b/HySoP/hysop/numerics/interpolation.py
@@ -1,17 +1,16 @@
 """
 @file interpolation.py
 """
-from parmepy.constants import np, PARMES_INDEX
+from parmepy.constants import np, PARMES_INTEGER, ORDER
 from parmepy.numerics.method import NumMethod
 
 
 class Linear(NumMethod):
     """Linear interpolation of a field"""
 
-    def __init__(self, f, d, topo, work, iwork):
+    def __init__(self, tab, direction, topo, work, iwork):
         """
-        @param f : Field to interpolate
-        @param d : Component of f to use
+        @param var : numpy array to interpolate
         @param dx : space grid step
         @param origin : grid lower point coordinates
         @param work : Work arrays (floats)
@@ -25,28 +24,29 @@ class Linear(NumMethod):
         """
         NumMethod.__init__(self)
         self.name = 'LinearInterpolation'
-        self.field = f
-        self.dir = d
+        self.tab = tab
         self.topo = topo
-        self._dim = self.field.dimension
+        dimension = self.topo.domain.dimension
         self.work = work
+        for iw in iwork:
+            assert iw.dtype == PARMES_INTEGER
         self.iwork = iwork
         assert len(self.work) == 1
-        assert len(self.iwork) == self._dim
-
-        if self._dim == 3:
+        assert len(self.iwork) == dimension
+        self.dir = direction
+        if dimension == 3:
             if self.dir == 0:
                 self._affect_working_arrays = self._affect_work_3D_X
             if self.dir == 1:
                 self._affect_working_arrays = self._affect_work_3D_Y
             if self.dir == 2:
                 self._affect_working_arrays = self._affect_work_3D_Z
-        if self._dim == 2:
+        if dimension == 2:
             if self.dir == 0:
                 self._affect_working_arrays = self._affect_work_2D_X
             if self.dir == 1:
                 self._affect_working_arrays = self._affect_work_2D_Y
-        if self._dim == 1:
+        if dimension == 1:
             if self.dir == 0:
                 self._affect_working_arrays = self._affect_work_1D
 
@@ -58,45 +58,42 @@ class Linear(NumMethod):
         return (self.work[0], tuple(self.iwork))
 
     def _affect_work_2D_X(self, resol):
-        self.iwork[1][...] = np.indices((resol[1],))[0].astype(
-            PARMES_INDEX)[np.newaxis, :]
+        self.iwork[1][...] = np.indices((resol[1],))[0][np.newaxis, :]
         return (self.work[0], tuple(self.iwork))
 
     def _affect_work_2D_Y(self, resol):
-        self.iwork[0][...] = np.indices((resol[0],))[0].astype(
-            PARMES_INDEX)[:, np.newaxis]
+        self.iwork[0][...] = np.indices((resol[0],))[0][:, np.newaxis]
         return (self.work[0], tuple(self.iwork))
 
     def _affect_work_3D_X(self, resol):
-        self.iwork[1][...] = np.indices((resol[1],))[0].astype(
-            PARMES_INDEX)[np.newaxis, :, np.newaxis]
-        self.iwork[2][...] = np.indices((resol[2],))[0].astype(
-            PARMES_INDEX)[np.newaxis, np.newaxis, :]
+        self.iwork[1][...] = np.indices((resol[1],))[0][np.newaxis,
+                                                        :, np.newaxis]
+        self.iwork[2][...] = np.indices((resol[2],))[0][np.newaxis,
+                                                        np.newaxis, :]
         return (self.work[0], tuple(self.iwork))
 
     def _affect_work_3D_Y(self, resol):
-        self.iwork[0][...] = np.indices((resol[0],))[0].astype(
-            PARMES_INDEX)[:, np.newaxis, np.newaxis]
-        self.iwork[2][...] = np.indices((resol[2],))[0].astype(
-            PARMES_INDEX)[np.newaxis, np.newaxis, :]
+        self.iwork[0][...] = np.indices((resol[0],))[0][:,
+                                                        np.newaxis, np.newaxis]
+        self.iwork[2][...] = np.indices((resol[2],))[0][np.newaxis,
+                                                        np.newaxis, :]
         return (self.work[0], tuple(self.iwork))
 
     def _affect_work_3D_Z(self, resol):
-        self.iwork[0][...] = np.indices((resol[0],))[0].astype(
-            PARMES_INDEX)[:, np.newaxis, np.newaxis]
-        self.iwork[1][...] = np.indices((resol[1],))[0].astype(
-            PARMES_INDEX)[np.newaxis, :, np.newaxis]
+        self.iwork[0][...] = np.indices((resol[0],))[0][:,
+                                                        np.newaxis, np.newaxis]
+        self.iwork[1][...] = np.indices((resol[1],))[0][np.newaxis,
+                                                        :, np.newaxis]
         return (self.work[0], tuple(self.iwork))
 
     def __call__(self, t, y, result):
         """
         Computational core for interpolation.
         """
-        topo = self.field.topology
-        origin = topo.domain.origin
-        mesh = topo.mesh
+        origin = self.topo.domain.origin
+        mesh = self.topo.mesh
         dx = mesh.space_step
-        resolution = topo.globalMeshResolution
+        resolution = mesh.discretization.resolution
         x = y[0]
         res = result[0]
         i_y, index = self._affect_working_arrays(mesh.resolution)
@@ -108,12 +105,14 @@ class Linear(NumMethod):
         i_y[...] -= floor
         # use res as the result (no more uses to floor variable)
 
-        index[self.dir][...] = floor.astype(PARMES_INDEX) \
-            % (resolution[self.dir] - 1)
-        res[...] = self.field.data[self.dir][index] * (1. - i_y)
+        index[self.dir][...] = np.asarray(
+            floor, dtype=PARMES_INTEGER, order=ORDER) % (resolution[self.dir] - 1)
+
+        res[...] = self.tab[index] * (1. - i_y)
 
         index[self.dir][...] = (index[self.dir] + 1) \
             % (resolution[self.dir] - 1)
-        res[...] += self.field.data[self.dir][index] * i_y
+
+        res[...] += self.tab[index] * i_y
 
         return [res, ]
diff --git a/HySoP/hysop/numerics/remeshing.py b/HySoP/hysop/numerics/remeshing.py
index e78faa293811b8632a3d779ea280b5337c0ebb13..4c07e382a2eb355ea8c43b6d2b7f0e488398ced9 100644
--- a/HySoP/hysop/numerics/remeshing.py
+++ b/HySoP/hysop/numerics/remeshing.py
@@ -1,14 +1,15 @@
 """
 @file remeshing.py
 """
-from parmepy.constants import np, PARMES_INDEX
+from parmepy.constants import np, PARMES_INDEX, PARMES_REAL
 from parmepy.numerics.method import NumMethod
+import parmepy.tools.numpywrappers as npw
 
 
 class Remeshing(NumMethod):
     """Remshing"""
 
-    def __init__(self, dim, topo, d, work, iwork):
+    def __init__(self, kernel, dim, topo, d, work, iwork):
         """
         Create a remeshing numeric method based on given formula.
         @param dim : problem dimension
@@ -38,14 +39,15 @@ class Remeshing(NumMethod):
           - 'm8prime' : M8prime formula
         """
         NumMethod.__init__(self)
+        self._kernel = kernel()
         self.dir = d
         self._dim = dim
         self.work = work
         self.iwork = iwork
         assert len(self.work) == 2
         assert len(self.iwork) == self._dim
-        self._shift = 0
-        self._weights = None
+        self.shift = self._kernel.shift
+        self.weights = self._kernel.weights
         self.topo = topo
         self._slice_all = [slice(None, None, None)
                            for dd in xrange(dim)]
@@ -121,7 +123,7 @@ class Remeshing(NumMethod):
         """
         d = self.dir
         mesh = self.topo.mesh
-        resolution = self.topo.globalMeshResolution
+        resolution = self.topo.mesh.discretization.resolution
         origin = self.topo.domain.origin
         dx = mesh.space_step
         tmp, i_y, index = self._affect_working_arrays(mesh.resolution)
@@ -133,13 +135,14 @@ class Remeshing(NumMethod):
         i_y[...] -= floor
 
         # Gobal indices
-        index[d][...] = (floor.astype(PARMES_INDEX) - self._shift) \
+        index[d][...] = (floor.astype(PARMES_INDEX) - self.shift) \
             % (resolution[d] - 1)
         result[...] = 0.  # reset res array (no more uses to floor variable)
-        for w_id, w in enumerate(self._weights):
+        for w_id, w in enumerate(self.weights):
             if w_id > 0:
                 index[d][...] = (index[d] + 1) % (resolution[d] - 1)
-            tmp[...] = w(i_y, pscal)
+            tmp[...] = self._kernel(w_id, i_y, tmp)
+            tmp *= pscal
             for i in xrange(mesh.resolution[d]):
                 sl = self.slice_i_along_d(i, d)
                 index_sl = tuple([ind[sl] for ind in index])
@@ -148,202 +151,418 @@ class Remeshing(NumMethod):
         return result
 
 
-class L2_1(Remeshing):
-    """Remshing with L2_1 kernel."""
-    def __init__(self, dim, topo, d, work, iwork):
-        Remeshing.__init__(self, dim, topo, d, work, iwork)
-        self._shift = 1
-        self._weights = [
-            lambda y, s: s * ((y * (y * (-y + 2.) - 1.)) / 2.),
-            lambda y, s: s * ((y * y * (3. * y - 5.) + 2.) / 2.),
-            lambda y, s: s * ((y * (y * (-3. * y + 4.) + 1.)) / 2.),
-            lambda y, s: s * ((y * y * (y - 1.)) / 2.)
+class RemeshFormula(object):
+    """Abstract class for remeshing formulas"""
+    def __init__(self):
+        self.shift = 0
+        self.weights = None
+
+    def __call__(self, w, x, res):
+        """Compute remeshing weights."""
+        res[...] = self.weights[w][0]
+        for c in self.weights[w][1:]:
+            res[...] *= x
+            res[...] += c
+        return res
+
+
+class Linear(RemeshFormula):
+    """Linear kernel."""
+    def __init__(self):
+        super(Linear, self).__init__()
+        self.shift = 0
+        self.weights = [
+            npw.asrealarray([-1, 1]),
+            npw.asrealarray([1, 0]),
             ]
 
 
-class L2_2(Remeshing):
-    """Remshing with L2_2 kernel."""
-    def __init__(self, dim, topo, d, work, iwork):
-        Remeshing.__init__(self, dim, topo, d, work, iwork)
-        self._shift = 1
-        self._weights = [
-            lambda y, s: s * ((y * (y * (y * (y * (2. * y - 5.) + 3.) + 1.) - 1.)) / 2.),
-            lambda y, s: s * ((y * y * (y * (y * (-6. * y + 15.) - 9.) - 2.) + 2.) / 2.),
-            lambda y, s: s * ((y * (y * (y * (y * (6. * y - 15.) + 9.) + 1.) + 1.)) / 2.),
-            lambda y, s: s * ((y * y * y * (y * (-2. * y + 5.) - 3.)) / 2.)
+class L2_1(RemeshFormula):
+    """L2_1 kernel."""
+    def __init__(self):
+        super(L2_1, self).__init__()
+        self.shift = 1
+        self.weights = [
+            npw.asrealarray([-1, 2, -1, 0]) / 2.,
+            npw.asrealarray([3, -5, 0, 2]) / 2.,
+            npw.asrealarray([-3, 4, 1, 0]) / 2.,
+            npw.asrealarray([1, -1, 0, 0]) / 2.,
             ]
 
 
-class L2_3(Remeshing):
-    """Remshing with L2_3 kernel."""
-    def __init__(self, dim, topo, d, work, iwork):
-        Remeshing.__init__(self, dim, topo, d, work, iwork)
-        self._shift = 1
-        self._weights = [
-            lambda y, s: s * ((y * (y * (y * y * (y * (y * (-6. * y + 21.) - 25.) + 10.) + 1.) - 1.)) / 2.),
-            lambda y, s: s * ((y * y * (y * y * (y * (y * (18. * y - 63.) + 75.) - 30.) - 2.) + 2.) / 2.),
-            lambda y, s: s * ((y * (y * (y * y * (y * (y * (-18. * y + 63.) - 75.) + 30.) + 1.) + 1.)) / 2.),
-            lambda y, s: s * ((y * y * y * y * (y * (y * (6. * y - 21.) + 25.) - 10.)) / 2.)
+class L2_2(RemeshFormula):
+    """L2_2 kernel."""
+    def __init__(self):
+        super(L2_2, self).__init__()
+        self.shift = 1
+        self.weights = [
+            npw.asrealarray([2, -5, 3, 1, -1, 0]) / 2.,
+            npw.asrealarray([-6, 15, -9, -2, 0, 2]) / 2.,
+            npw.asrealarray([6, -15, 9, 1, 1, 0]) / 2.,
+            npw.asrealarray([-2, 5, -3, 0, 0, 0]) / 2.,
             ]
 
 
-class L2_4(Remeshing):
-    """Remshing with L2_4 kernel."""
-    def __init__(self, dim, topo, d, work, iwork):
-        Remeshing.__init__(self, dim, topo, d, work, iwork)
-        self._shift = 1
-        self._weights = [
-            lambda y, s: s * ((y * (y * (y * y * y * (y * (y * (y * (20. * y - 90.) + 154.) - 119.) + 35.) + 1.) - 1.)) / 2.),
-            lambda y, s: s * ((y * y * (y * y * y * (y * (y * (y * (-60. * y + 270.) - 462.) + 357.) - 105.) - 2.) + 2.) / 2.),
-            lambda y, s: s * ((y * (y * (y * y * y * (y * (y * (y * (60. * y - 270.) + 462.) - 357.) + 105.) + 1.) + 1.)) / 2.),
-            lambda y, s: s * ((y * y * y * y * y * (y * (y * (y * (-20. * y + 90.) - 154.) + 119.) - 35.)) / 2.)
+class L2_3(RemeshFormula):
+    """L2_3 kernel."""
+    def __init__(self):
+        super(L2_3, self).__init__()
+        self.shift = 1
+        self.weights = [
+            npw.asrealarray([-6, 21, -25, 10, 0, 1, -1, 0]) / 2.,
+            npw.asrealarray([18, -63, 75, -30, 0, -2, 0, 2]) / 2.,
+            npw.asrealarray([-18, 63, -75, 30, 0, 1, 1, 0]) / 2.,
+            npw.asrealarray([6, -21, 25, -10, 0, 0, 0, 0]) / 2.,
             ]
 
 
-class L4_2(Remeshing):
-    """Remshing with L4_2 kernel."""
-    def __init__(self, dim, topo, d, work, iwork):
-        Remeshing.__init__(self, dim, topo, d, work, iwork)
-        self._shift = 2
-        self._weights = [
-            lambda y, s: s * y * (y * (y * (y * (13. - 5. * y) - 9.) - 1.) + 2.) / 24.,
-            lambda y, s: s * y * (y * (y * (y * (25. * y - 64.) + 39.) + 16.) - 16.) / 24.,
-            lambda y, s: s * (y * y * (y * (y * (126. - 50. * y) - 70.) - 30.) / 24. + 1.),
-            lambda y, s: s * y * (y * (y * (y * (50. * y - 124.) + 66.) + 16.) + 16.) / 24.,
-            lambda y, s: s * y * (y * (y * (y * (61. - 25. * y) - 33.) - 1.) - 2.) / 24.,
-            lambda y, s: s * y * y * y * (y * (5. * y - 12.) + 7.) / 24.
+class L2_4(RemeshFormula):
+    """L2_4 kernel."""
+    def __init__(self):
+        super(L2_4, self).__init__()
+        self.shift = 1
+        self.weights = [
+            npw.asrealarray([20, -90, 154, -119, 35, 0, 0, 1, -1, 0]) / 2.,
+            npw.asrealarray([-60, 270, -462, 357, -105, 0, 0, -2, 0, 2]) / 2.,
+            npw.asrealarray([60, -270, 462, -357, 105, 0, 0, 1, 1, 0]) / 2.,
+            npw.asrealarray([-20, 90, -154, 119, -35, 0, 0, 0, 0, 0]) / 2.,
             ]
 
 
-class L4_3(Remeshing):
-    """Remshing with L4_3 kernel."""
-    def __init__(self, dim, topo, d, work, iwork):
-        Remeshing.__init__(self, dim, topo, d, work, iwork)
-        self._shift = 2
-        self._weights = [
-            lambda y, s: s * (2. + (-1. + (-2. + (-22. + (58. + (-49. + 14. * y) * y) * y) * y) * y) * y) * y / 24.,
-            lambda y, s: s * (-16. + (16. + (4. + (111. + (-290. + (245. - 70. * y) * y) * y) * y) * y) * y) * y / 24.,
-            lambda y, s: s * (1. + (-30. + (-224. + (580. + (-490. + 140. * y) * y) * y) * y * y) * y * y / 24.),
-            lambda y, s: s * (16. + (16. + (-4. + (226. + (-580. + (490. - 140. * y) * y) * y) * y) * y) * y) * y / 24.,
-            lambda y, s: s * (-2. + (-1. + (2. + (-114. + (290. + (-245. + 70. * y) * y) * y) * y) * y) * y) * y / 24.,
-            lambda y, s: s * (23. + (-58. + (49. - 14. * y) * y) * y) * y * y * y * y / 24.
+class L4_2(RemeshFormula):
+    """L4_2 kernel."""
+    def __init__(self):
+        super(L4_2, self).__init__()
+        self.shift = 2
+        self.weights = [
+            npw.asrealarray([-5, 13, -9, -1, 2, 0]) / 24.,
+            npw.asrealarray([25, -64, 39, 16, -16, 0]) / 24.,
+            npw.asrealarray([-50, 126, -70, -30, 0, 24]) / 24.,
+            npw.asrealarray([50, -124, 66, 16, 16, 0]) / 24.,
+            npw.asrealarray([-25, 61, -33, -1, -2, 0]) / 24.,
+            npw.asrealarray([5, -12, 7, 0, 0, 0]) / 24.,
             ]
 
 
-class L4_4(Remeshing):
-    """Remshing with L4_4 kernel."""
-    def __init__(self, dim, topo, d, work, iwork):
-        Remeshing.__init__(self, dim, topo, d, work, iwork)
-        self._shift = 2
-        self._weights = [
-            lambda y, s: s * (2. + (-1. + (-2. + (1. + (-80. + (273. + (-354. + (207. - 46. * y) * y) * y) * y) * y) * y) * y) * y) * y / 24.,
-            lambda y, s: s * (-16. + (16. + (4. + (-4. + (400. + (-1365. + (1770. + (-1035. + 230. * y) * y) * y) * y) * y) * y) * y) * y) * y / 24.,
-            lambda y, s: s * (1. + (-30. + (6. + (-800. + (2730. + (-3540. + (2070. - 460. * y) * y) * y) * y) * y) * y * y) * y * y / 24.),
-            lambda y, s: s * (16. + (16. + (-4. + (-4. + (800. + (-2730. + (3540. + (-2070. + 460. * y) * y) * y) * y) * y) * y) * y) * y) * y / 24.,
-            lambda y, s: s * (-2. + (-1. + (2. + (1. + (-400. + (1365. + (-1770. + (1035. - 230. * y) * y) * y) * y) * y) * y) * y) * y) * y / 24.,
-            lambda y, s: s * (80. + (-273. + (354. + (-207. + 46. * y) * y) * y) * y) * y * y * y * y * y / 24.
+class L4_3(RemeshFormula):
+    """L4_3 kernel."""
+    def __init__(self):
+        super(L4_3, self).__init__()
+        self.shift = 2
+        self.weights = [
+            npw.asrealarray([14, -49, 58, -22, -2, -1, 2, 0]) / 24.,
+            npw.asrealarray([-70, 245, -290, 111, 4, 16, -16, 0]) / 24.,
+            npw.asrealarray([140, -490, 580, -224, 0, -30, 0, 24]) / 24.,
+            npw.asrealarray([-140, 490, -580, 226, -4, 16, 16, 0]) / 24.,
+            npw.asrealarray([70, -245, 290, -114, 2, -1, -2, 0]) / 24.,
+            npw.asrealarray([-14, 49, -58, 23, 0, 0, 0, 0]) / 24.,
             ]
 
 
-class M8Prime(Remeshing):
-    """Remshing with M8Prime kernel."""
-    def __init__(self, dim, topo, d, work, iwork):
-        Remeshing.__init__(self, dim, topo, d, work, iwork)
-        self._shift = 3
-        self._weights = [
-            lambda y, s: s * ((y * (y * (y * (y * (y * (y * (-10. * y + 21.) + 28.) - 105.)+ 70.) + 35.) - 56.) + 17.) / 3360.),
-            lambda y, s: s * ((y * (y * (y * (y * (y * (y * (70. * y - 175.) - 140.) + 770.)- 560.) - 350.) + 504.) - 102.) / 3360.),
-            lambda y, s: s * ((y * (y * (y * (y * (y * (y * (-210. * y + 609.) + 224.) - 2135.)+ 910.) + 2765.) - 2520.) + 255.) / 3360.),
-            lambda y, s: s * ((y * y * (y * y * (y * y * (70. * y - 231.) + 588.) - 980.) + 604.)/ 672.),
-            lambda y, s: s * ((y * (y * (y * (y * (y * (y * (-70. * y + 259.) - 84.) - 427.)- 182.) + 553.) + 504.) + 51.) / 672.),
-            lambda y, s: s * ((y * (y * (y * (y * (y * (y * (210. * y - 861.) + 532.) + 770.)+ 560.) - 350.) - 504.) - 102.) / 3360.),
-            lambda y, s: s * ((y * (y * (y * (y * (y * (y * (-70. * y + 315.) - 280.) - 105.)- 70.) + 35.) + 56.) + 17.) / 3360.),
-            lambda y, s: s * ((y * y * y * y * y * (y * (10. * y - 49.) + 56.)) / 3360.)
+class L4_4(RemeshFormula):
+    """L4_4 kernel."""
+    def __init__(self):
+        super(L4_4, self).__init__()
+        self.shift = 2
+        self.weights = [
+            npw.asrealarray([-46, 207, -354, 273, -80, 1, -2, -1, 2, 0]) / 24.,
+            npw.asrealarray([230, -1035, 1770, -1365, 400, -4, 4, 16, -16, 0]) / 24.,
+            npw.asrealarray([-460, 2070, -3540, 2730, -800, 6, 0, -30, 0, 24]) / 24.,
+            npw.asrealarray([460, -2070, 3540, -2730, 800, -4, -4, 16, 16, 0]) / 24.,
+            npw.asrealarray([-230, 1035, -1770, 1365, -400, 1, 2, -1, -2, 0]) / 24.,
+            npw.asrealarray([46, -207, 354, -273, 80, 0, 0, 0, 0, 0]) / 24.,
             ]
 
 
-class L6_3(Remeshing):
-    """Remshing with L6_3 kernel."""
-    def __init__(self, dim, topo, d, work, iwork):
-        Remeshing.__init__(self, dim, topo, d, work, iwork)
-        self._shift = 3
-        self._weights = [
-            lambda y, s: s * (((-12. + (4. + (15. + (140. + (-370.+ (312. - 89. * y) * y) * y) * y) * y) * y) * y) / 720.),
-            lambda y, s: s * (((108. + (-54. + (-120. + (-955. +(2581. + (-2183. + 623. * y) * y) * y) * y) * y) * y ) * y) / 720.),
-            lambda y, s: s * (((-180. + (180. + (65. + (950. +(-2574. + (2182. - 623. * y) * y) * y) * y) * y) * y) * y) / 240.),
-            lambda y, s: s * ( 1. + ((-196. + (-959. + (2569. +(-2181. + 623. * y) * y) * y) * y * y ) * y * y) / 144.),
-            lambda y, s: s * (((108. + (108. + (-39. + (976. + (-2566.+ (2180. - 623. * y) * y) * y) * y) * y) * y) * y) / 144.),
-            lambda y, s: s * (((-36. + (-18. + (40. + (-995. + (2565.+ (-2179. + 623. * y) * y) * y) * y) * y) * y) * y) / 240.),
-            lambda y, s: s * (((12. + (4. + (-15. + (1010. + (-2566.+ (2178. - 623. * y) * y) * y) * y) * y) * y) * y) / 720.),
-            lambda y, s: s * (((-145. + (367. + (-311. + 89. * y)* y) * y) * y * y * y * y) / 720.)
+class M8Prime(RemeshFormula):
+    """M8Prime kernel."""
+    def __init__(self):
+        super(M8Prime, self).__init__()
+        self.shift = 3
+        self.weights = [
+            npw.asrealarray([-10, 21, 28, -105, 70, 35, -56, 17]) / 3360.,
+            npw.asrealarray([70, -175, -140, 770, -560, -350, 504, -102]) / 3360.,
+            npw.asrealarray([-210, 609, 224, -2135, 910, 2765, -2520, 255]) / 3360.,
+            npw.asrealarray([350, -1155, 0, 2940, 0, -4900, 0, 3020]) / 3360.,
+            npw.asrealarray([-350, 1295, -420, -2135, -910, 2765, 2520, 255]) / 3360.,
+            npw.asrealarray([210, -861, 532, 770, 560, -350, -504, -102]) / 3360.,
+            npw.asrealarray([-70, 315, -280, -105, -70, 35, 56, 17]) / 3360.,
+            npw.asrealarray([10, -49, 56, 0, 0, 0, 0, 0]) / 3360.,
             ]
 
 
-class L6_4(Remeshing):
-    """Remshing with L6_4 kernel."""
-    def __init__(self, dim, topo, d, work, iwork):
-        Remeshing.__init__(self, dim, topo, d, work, iwork)
-        self._shift = 3
-        self._weights = [
-            lambda y, s: s * (-12. + (4. + (15. + (-5. + (500. + (-1718. + (2231. + (-1305. + 290. * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (108. + (-54. + (-120. + (60. + (-3509. + (12027. + (-15617. + (9135. - 2030. * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (-540. + (540. + (195. + (-195. + (10548. + (-36084. + (46851. + (-27405. + 6090. * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (1. + (-980. + (280. + (-17605. + (60145. + (-78085. + (45675. - 10150. * y) * y) * y) * y) * y) * y * y) * y * y / 720.),
-            lambda y, s: s * (540. + (540. + (-195. + (-195. + (17620. + (-60150. + (78085. + (-45675. + 10150. * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (-108. + (-54. + (120. + (60. + (-10575. + (36093. + (-46851. + (27405. - 6090. * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (12. + (4. + (-15. + (-5. + (3524. + (-12032. + (15617. + (-9135. + 2030. * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (-503. + (1719. + (-2231. + (1305. - 290. * y) * y) * y) * y) * y * y * y * y * y / 720.
+class L6_3(RemeshFormula):
+    """L6_3 kernel."""
+    def __init__(self):
+        super(L6_3, self).__init__()
+        self.shift = 3
+        self.weights = [
+            npw.asrealarray([-89, 312, -370, 140, 15, 4, -12, 0]) / 720.,
+            npw.asrealarray([623, -2183, 2581, -955, -120, -54, 108, 0]) / 720.,
+            npw.asrealarray([-1869, 6546, -7722, 2850, 195, 540, -540, 0]) / 720.,
+            npw.asrealarray([3115, -10905, 12845, -4795, 0, -980, 0, 720]) / 720.,
+            npw.asrealarray([-3115, 10900, -12830, 4880, -195, 540, 540, 0]) / 720.,
+            npw.asrealarray([1869, -6537, 7695, -2985, 120, -54, -108, 0]) / 720.,
+            npw.asrealarray([-623, 2178, -2566, 1010, -15, 4, 12, 0]) / 720.,
+            npw.asrealarray([89, -311, 367, -145, 0, 0, 0, 0]) / 720.,
             ]
 
 
-class L6_5(Remeshing):
-    """Remshing with L6_5 kernel."""
-    def __init__(self, dim, topo, d, work, iwork):
-        Remeshing.__init__(self, dim, topo, d, work, iwork)
-        self._shift = 3
-        self._weights = [
-            lambda y, s: s * (-12. + (4. + (15. + (-5. + (-3. + (1803. + (-7829. + (13785. + (-12285. + (5533. - 1006. * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (108. + (-54. + (-120. + (60. + (12. + (-12620. + (54803. + (-96495. + (85995. + (-38731. + 7042. * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (-540. + (540. + (195. + (-195. + (-15. + (37857. + (-164409. + (289485. + (-257985. + (116193. - 21126. * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (1. + (-980. + (280. + (-63090. + (274015. + (-482475. + (429975. + (-193655. + 35210. * y) * y) * y) * y) * y) * y * y) * y * y) * y * y / 720.),
-            lambda y, s: s * (540. + (540. + (-195. + (-195. + (15. + (63085. + (-274015. + (482475. + (-429975. + (193655. - 35210. * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (-108. + (-54. + (120. + (60. + (-12. + (-37848. + (164409. + (-289485. + (257985. + (-116193. + 21126. * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (12. + (4. + (-15. + (-5. + (3. + (12615. + (-54803. + (96495. + (-85995. + (38731. - 7042. * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (-1802. + (7829. + (-13785. + (12285. + (-5533. + 1006. * y) * y) * y) * y) * y) * y * y * y * y * y * y / 720.
+class L6_4(RemeshFormula):
+    """L6_4 kernel."""
+    def __init__(self):
+        super(L6_4, self).__init__()
+        self.shift = 3
+        self.weights = [
+            npw.asrealarray([290, -1305, 2231, -1718, 500, -5, 15, 4, -12, 0]) / 720.,
+            npw.asrealarray([-2030, 9135, -15617, 12027, -3509, 60, -120, -54, 108, 0]) / 720.,
+            npw.asrealarray([6090, -27405, 46851, -36084, 10548, -195, 195, 540, -540, 0]) / 720.,
+            npw.asrealarray([-10150, 45675, -78085, 60145, -17605, 280, 0, -980, 0, 720]) / 720.,
+            npw.asrealarray([10150, -45675, 78085, -60150, 17620, -195, -195, 540, 540, 0]) / 720.,
+            npw.asrealarray([-6090, 27405, -46851, 36093, -10575, 60, 120, -54, -108, 0]) / 720.,
+            npw.asrealarray([2030, -9135, 15617, -12032, 3524, -5, -15, 4, 12, 0]) / 720.,
+            npw.asrealarray([-290, 1305, -2231, 1719, -503, 0, 0, 0, 0, 0]) / 720.,
             ]
 
 
-class L6_6(Remeshing):
-    """Remshing with L6_6 kernel."""
-    def __init__(self, dim, topo, d, work, iwork):
-        Remeshing.__init__(self, dim, topo, d, work, iwork)
-        self._shift = 3
-        self._weights = [
-            lambda y, s: s * (-12. + (4. + (15. + (-5. + (-3. + (1. + (6587. + (-34869. + (77815. + (-93577. + (63866. + (-23426. + 3604. * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (108. + (-54. + (-120. + (60. + (12. + (-6. + (-46109. + (244083. + (-544705. + (655039. + (-447062. + (163982. - 25228. * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (-540. + (540. + (195. + (-195. + (-15. + (15. + (138327. + (-732249. + (1634115. + (-1965117. + (1341186. + (-491946. + 75684. * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (1. + (-980. + (280. + (-20. + (-230545. + (1220415. + (-2723525. + (3275195. + (-2235310. + (819910. - 126140. * y) * y) * y) * y) * y) * y) * y) * y * y) * y * y) * y * y / 720.),
-            lambda y, s: s * (540. + (540. + (-195. + (-195. + (15. + (15. + (230545. + (-1220415. + (2723525. + (-3275195. + (2235310. + (-819910. + 126140. * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (-108. + (-54. + (120. + (60. + (-12. + (-6. + (-138327. + (732249. + (-1634115. + (1965117. + (-1341186. + (491946. - 75684. * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (12. + (4. + (-15. + (-5. + (3. + (1. + (46109. + (-244083. + (544705. + (-655039. + (447062. + (-163982. + 25228. * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y) * y / 720.,
-            lambda y, s: s * (-6587. + (34869. + (-77815. + (93577. + (-63866. + (23426. - 3604. * y) * y) * y) * y) * y) * y) * y * y * y * y * y * y * y / 720.
+class L6_5(RemeshFormula):
+    """L6_5 kernel."""
+    def __init__(self):
+        super(L6_5, self).__init__()
+        self.shift = 3
+        self.weights = [
+            npw.asrealarray([-1006, 5533, -12285, 13785, -7829, 1803, -3, -5, 15, 4, -12, 0]) / 720.,
+            npw.asrealarray([7042, -38731, 85995, -96495, 54803, -12620, 12, 60, -120, -54, 108, 0]) / 720.,
+            npw.asrealarray([-21126, 116193, -257985, 289485, -164409, 37857, -15, -195, 195, 540, -540, 0]) / 720.,
+            npw.asrealarray([35210, -193655, 429975, -482475, 274015, -63090, 0, 280, 0, -980, 0, 720]) / 720.,
+            npw.asrealarray([-35210, 193655, -429975, 482475, -274015, 63085, 15, -195, -195, 540, 540, 0]) / 720.,
+            npw.asrealarray([21126, -116193, 257985, -289485, 164409, -37848, -12, 60, 120, -54, -108, 0]) / 720.,
+            npw.asrealarray([-7042, 38731, -85995, 96495, -54803, 12615, 3, -5, -15, 4, 12, 0]) / 720.,
+            npw.asrealarray([1006, -5533, 12285, -13785, 7829, -1802, 0, 0, 0, 0, 0, 0]) / 720.,
+            ]
+
+class L6_6(RemeshFormula):
+    """L6_6 kernel."""
+    def __init__(self):
+        super(L6_6, self).__init__()
+        self.shift = 3
+        self.weights = [
+            npw.asrealarray([3604, -23426, 63866, -93577, 77815, -34869, 6587, 1, -3, -5, 15, 4, -12, 0]) / 720.,
+            npw.asrealarray([-25228, 163982, -447062, 655039, -544705, 244083, -46109, -6, 12, 60, -120, -54, 108, 0]) / 720.,
+            npw.asrealarray([75684, -491946, 1341186, -1965117, 1634115, -732249, 138327, 15, -15, -195, 195, 540, -540, 0]) / 720.,
+            npw.asrealarray([-126140, 819910, -2235310, 3275195, -2723525, 1220415, -230545, -20, 0, 280, 0, -980, 0, 720]) / 720.,
+            npw.asrealarray([126140, -819910, 2235310, -3275195, 2723525, -1220415, 230545, 15, 15, -195, -195, 540, 540, 0]) / 720.,
+            npw.asrealarray([-75684, 491946, -1341186, 1965117, -1634115, 732249, -138327, -6, -12, 60, 120, -54, -108, 0]) / 720.,
+            npw.asrealarray([25228, -163982, 447062, -655039, 544705, -244083, 46109, 1, 3, -5, -15, 4, 12, 0]) / 720.,
+            npw.asrealarray([-3604, 23426, -63866, 93577, -77815, 34869, -6587, 0, 0, 0, 0, 0, 0, 0]) / 720.,
             ]
 
 
-class L8_4(Remeshing):
-    """Remshing with L8_4 kernel."""
-    def __init__(self, dim, topo, d, work, iwork):
-        Remeshing.__init__(self, dim, topo, d, work, iwork)
-        self._shift = 4
-        self._weights = [
-            lambda y, s: s * ((y * (y * (y * (y * (y * (y * (y * (y * (-3569. * y + 16061.) - 27454.) + 21126.) - 6125.) + 49.) - 196.) - 36.) + 144.)) / 40320.),
-            lambda y, s: s * ((y * (y * (y * (y * (y * (y * (y * (y * (32121. * y - 144548.) + 247074.) - 190092.) + 55125.) - 672.) + 2016.) + 512.) - 1536.)) / 40320.),
-            lambda y, s: s * ((y * (y * (y * (y * (y * (y * (y * (y * (-128484. * y + 578188.) - 988256.) + 760312.) - 221060.) + 4732.) - 9464.) - 4032.) + 8064.)) / 40320.),
-            lambda y, s: s * ((y * (y * (y * (y * (y * (y * (y * (y * (299796. * y - 1349096.) + 2305856.) - 1774136.) + 517580.) - 13664.) + 13664.) + 32256.) - 32256.)) / 40320.),
-            lambda y, s: s * ((y * y * (y * y * (y * (y * (y * (y * (-449694. * y + 2023630.) - 3458700.) + 2661540.) - 778806.) + 19110.) - 57400.) + 40320.) / 40320.),
-            lambda y, s: s * ((y * (y * (y * (y * (y * (y * (y * (y * (449694. * y - 2023616.) + 3458644.) - 2662016.) + 780430.) - 13664.) - 13664.) + 32256.) + 32256.)) / 40320.),
-            lambda y, s: s * ((y * (y * (y * (y * (y * (y * (y * (y * (-299796. * y + 1349068.) - 2305744.) + 1775032.) - 520660.) + 4732.) + 9464.) - 4032.) - 8064.)) / 40320.),
-            lambda y, s: s * ((y * (y * (y * (y * (y * (y * (y * (y * (128484. * y - 578168.) + 988176.) - 760872.) + 223020.) - 672.) - 2016.) + 512.) + 1536.)) / 40320.),
-            lambda y, s: s * ((y * (y * (y * (y * (y * (y * (y * (y * (-32121. * y + 144541.) - 247046.) + 190246.) - 55685.) + 49.) + 196.) - 36.) - 144.)) / 40320.),
-            lambda y, s: s * ((y * y * y * y * y * (y * (y * (y * (3569. * y - 16060.) + 27450.) - 21140.) + 6181.)) / 40320.)
+class L8_4(RemeshFormula):
+    """L8_4 kernel."""
+    def __init__(self):
+        super(L8_4, self).__init__()
+        self.shift = 4
+        self.weights = [
+            npw.asrealarray([-3569, 16061, -27454, 21126, -6125, 49, -196, -36, 144, 0]) / 40320.,
+            npw.asrealarray([32121, -144548, 247074, -190092, 55125, -672, 2016, 512, -1536, 0]) / 40320.,
+            npw.asrealarray([-128484, 578188, -988256, 760312, -221060, 4732, -9464, -4032, 8064, 0]) / 40320.,
+            npw.asrealarray([299796, -1349096, 2305856, -1774136, 517580, -13664, 13664, 32256, -32256, 0]) / 40320.,
+            npw.asrealarray([-449694, 2023630, -3458700, 2661540, -778806, 19110, 0, -57400, 0, 40320]) / 40320.,
+            npw.asrealarray([449694, -2023616, 3458644, -2662016, 780430, -13664, -13664, 32256, 32256, 0]) / 40320.,
+            npw.asrealarray([-299796, 1349068, -2305744, 1775032, -520660, 4732, 9464, -4032, -8064, 0]) / 40320.,
+            npw.asrealarray([128484, -578168, 988176, -760872, 223020, -672, -2016, 512, 1536, 0]) / 40320.,
+            npw.asrealarray([-32121, 144541, -247046, 190246, -55685, 49, 196, -36, -144, 0]) / 40320.,
+            npw.asrealarray([3569, -16060, 27450, -21140, 6181, 0, 0, 0, 0, 0]) / 40320.,
             ]
+
+
+def polynomial_optimisation():
+    """Testing different python implementation of a polynomial expression.
+    Use polynomial of degree 10 :
+    10*x^10+9*x^9+8*x^8+7*x^7+6*x^6+5*x^5+4*x^4+3*x^3+2*x^2+x
+    """
+    def test_form(func, r, a, s, *args):
+        tt = 0.
+        for i in xrange(10):
+            r[...] = 0.
+            t = MPI.Wtime()
+            r[...] = func(a, *args)
+            tt += (MPI.Wtime() - t)
+        print tt, s
+
+    from parmepy.constants import PARMES_REAL, ORDER
+    from parmepy.mpi.main_var import MPI
+    nb = 128
+    a = npw.asrealarray(np.random.random((nb, nb, nb)),
+                   dtype=PARMES_REAL, order=ORDER)
+    r = np.zeros_like(a)
+    temp = np.zeros_like(a)
+    lambda_p = lambda x: 1. + 2. * x + 3. * x ** 2 + 4.  *x ** 3 + 5. * x ** 4 + \
+               6. * x ** 5 + 7. * x ** 6 + 8. * x ** 7 + 9. * x ** 8 + 10. * x ** 9 + \
+               11. * x ** 10
+    lambda_h = lambda x: (x * (x * (x * (x * (x * (x * (x * (x * (11. * x + 10.) + 9.) + \
+                8.) + 7.) + 6.) + 5.) + 4.) + 3.) + 2.) + 1.
+    coeffs = coeffs = npw.asrealarray(np.arange(11, 0, -1))
+
+    def func_h(x, r):
+        r[...] = coeffs[0]
+        for c in coeffs[1:]:
+            r[...] *= x
+            r[...] += c
+
+    def func_p(x, r, tmp):
+        r[...] = 1.
+        tmp[...] = x
+        tmp[...] *= 2.
+        r[...] += tmp
+        tmp[...] = x ** 2
+        tmp[...] *= 3.
+        r[...] += tmp
+        tmp[...] = x ** 3
+        tmp[...] *= 4.
+        r[...] += tmp
+        tmp[...] = x ** 4
+        tmp[...] *= 5.
+        r[...] += tmp
+        tmp[...] = x ** 5
+        tmp[...] *= 6.
+        r[...] += tmp
+        tmp[...] = x ** 6
+        tmp[...] *= 7.
+        r[...] += tmp
+        tmp[...] = x ** 7
+        tmp[...] *= 8.
+        r[...] += tmp
+        tmp[...] = x ** 8
+        tmp[...] *= 9.
+        r[...] += tmp
+        tmp[...] = x ** 9
+        tmp[...] *= 10.
+        r[...] += tmp
+        tmp[...] = x ** 10
+        tmp[...] *= 11.
+        r[...] += tmp
+
+    def func_p_bis(x, r, tmp):
+        r[...] = 1.
+        tmp[...] = x
+        tmp[...] *= 2.
+        r[...] += tmp
+        tmp[...] = x
+        tmp[...] *= x
+        tmp[...] *= 3.
+        r[...] += tmp
+        tmp[...] = x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= 4.
+        r[...] += tmp
+        tmp[...] = x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= 5.
+        r[...] += tmp
+        tmp[...] = x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= 6.
+        r[...] += tmp
+        tmp[...] = x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= 7.
+        r[...] += tmp
+        tmp[...] = x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= 8.
+        r[...] += tmp
+        tmp[...] = x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= 9.
+        r[...] += tmp
+        tmp[...] = x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= 10.
+        r[...] += tmp
+        tmp[...] = x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= x
+        tmp[...] *= 11.
+        r[...] += tmp
+
+    from numpy.polynomial.polynomial import polyval
+
+    def np_polyval(x, r):
+        r[...] = polyval(x, coeffs[::-1])
+
+    assert lambda_h(1.) == 66.
+    assert lambda_p(1.) == 66.
+    single_val = np.ones((1, ), order=ORDER)
+    single_val_r = np.zeros_like(single_val)
+    single_val_tmp = np.zeros_like(single_val)
+    func_p(single_val, single_val_r, single_val_tmp)
+    assert single_val_r[0] == 66.
+    single_val_r[0] = 0.
+    func_p_bis(single_val, single_val_r, single_val_tmp)
+    assert single_val_r[0] == 66.
+    single_val_r[0] = 0.
+    func_h(single_val, single_val_r)
+    assert single_val_r[0] == 66.
+    single_val_r[0] = 0.
+    np_polyval(single_val, single_val_r)
+    assert single_val_r[0] == 66.
+
+    test_form(lambda_p, r, a, "Lambda base canonique")
+    test_form(lambda_h, r, a, "Lambda Horner")
+    test_form(func_p, r, a, "Function base canonique", r, temp)
+    test_form(func_p_bis, r, a, "Function base canonique (bis)", r, temp)
+    test_form(func_h, r, a, "Function Horner", r)
+    test_form(np_polyval, r, a, "Numpy polyval", r)
+
+    res_test = np.empty_like(a)
+    res_test_coeff = np.empty_like(a)
+    w_test = lambda y: (-12. + (4. + (15. + (-5. + (-3. + (1. + (6587. + (-34869. + \
+        (77815. + (-93577. + (63866. + (-23426. + 3604. * y) * y) * y) * y) * y) * y) \
+        * y) * y) * y) * y) * y) * y) * y / 720.
+    res_test[...] = w_test(a)
+    w_test_coeffs = npw.asrealarray([3604, -23426, 63866, -93577, 77815, -34869, 6587,
+                                     1, -3, -5, 15, 4, -12, 0]) / 720.
+    res_test_coeff[...] = w_test_coeffs[0]
+    for c in w_test_coeffs[1:]:
+        res_test_coeff[...] *= a
+        res_test_coeff[...] += c
+
+    print np.max(res_test - res_test_coeff)
+    assert np.allclose(res_test, res_test_coeff)
diff --git a/HySoP/hysop/numerics/tests/test_diffOp.py b/HySoP/hysop/numerics/tests/test_diffOp.py
index c1b5b768c4e50d8745df5166dc539b1e5469b67b..ca315c24081d4962596a1240877ef42371efb64c 100755
--- a/HySoP/hysop/numerics/tests/test_diffOp.py
+++ b/HySoP/hysop/numerics/tests/test_diffOp.py
@@ -2,7 +2,6 @@
 import parmepy as pp
 import numpy as np
 from parmepy.fields.continuous import Field
-from parmepy.mpi.topology import Cartesian
 import parmepy.numerics.differential_operations as diffop
 import parmepy.tools.numpywrappers as npw
 import math as m
@@ -27,6 +26,17 @@ def computeVort(res, x, y, z, t):
     return res
 
 
+def computeVel2(res, x, y, t):
+    res[0][...] = sin(x) * cos(y)
+    res[1][...] = - cos(x) * sin(y)
+    return res
+
+
+def computeVort2(res, x, y, t):
+    res[0][...] = 2. * sin(x) * sin(y)
+    return res
+
+
 def analyticalDivWV(res, x, y, z, t):
     res[0][...] = - sin(y) * cos(y) * sin(z) * cos(z) * \
         (- sin(x) * sin(x) + cos(x) * cos(x)) + \
@@ -52,36 +62,57 @@ def analyticalDivStressTensor(res, x, y, z, t):
 
 
 nb = 65
-box = pp.Box(3, length=[2.0 * pi, 2.0 * pi, 2.0 * pi])
-nbElem = [nb] * 3
-topo = Cartesian(box, box.dimension, nbElem,
-                 ghosts=[2, 2, 2])
-velo = Field(domain=box, formula=computeVel,
-             name='Velocity', isVector=True)
-vorti = Field(domain=box, formula=computeVort,
-              name='Vorticity', isVector=True)
-velo.discretize(topo)
-vorti.discretize(topo)
-velo.initialize(topo=topo)
-vorti.initialize(topo=topo)
-wd = vorti.discreteFields[topo]
-vd = velo.discreteFields[topo]
-ind = topo.mesh.iCompute
+from parmepy.tools.parameters import Discretization
+d3 = Discretization([nb] * 3, [2] * 3)
+d2 = Discretization([nb] * 2, [2] * 2)
+
+
+def init(discr, vform, wform):
+    dim = len(discr.resolution)
+    box = pp.Box(length=[2.0 * pi] * dim)
+    topo = box.create_topology(discretization=discr)
+    velo = Field(domain=box, formula=vform,
+                 name='Velocity', isVector=True)
+    vorti = Field(domain=box, formula=wform,
+                  name='Vorticity', isVector=dim == 3)
+    velo.discretize(topo)
+    vorti.discretize(topo)
+    velo.initialize(topo=topo)
+    vorti.initialize(topo=topo)
+    wd = vorti.discreteFields[topo]
+    vd = velo.discreteFields[topo]
+    return vd, wd
 
 
 def testCurl():
+    vd, wd = init(d3, computeVel, computeVort)
     memshape = vd.data[0].shape
     result = [npw.zeros(memshape) for i in xrange(3)]
     lwork = diffop.Curl.getWorkLengths()
     work = [npw.zeros(memshape) for i in xrange(lwork)]
-    curlOp = diffop.Curl(topo, work)
+    curlOp = diffop.Curl(vd.topology, work)
     result = curlOp(vd.data, result)
-
+    ind = vd.topology.mesh.iCompute
     for i in xrange(3):
         assert np.allclose(wd.data[i][ind], result[i][ind])
 
 
+def testCurl2D():
+    vd, wd = init(d2, computeVel2, computeVort2)
+    memshape = vd.data[0].shape
+    result = [npw.zeros(memshape)]
+    lwork = diffop.Curl.getWorkLengths()
+    work = [npw.zeros(memshape) for _ in xrange(lwork)]
+    curlOp = diffop.Curl(vd.topology, work)
+    result = curlOp(vd.data, result)
+    ind = vd.topology.mesh.iCompute
+    assert np.allclose(wd.data[0][ind], result[0][ind])
+
+
 def testDivWV():
+    vd, wd = init(d3, computeVel, computeVort)
+    box = vd.domain
+    topo = vd.topology
     # Reference field
     ref = Field(domain=box, formula=analyticalDivWV,
                 name='Analytical', isVector=True)
@@ -99,11 +130,15 @@ def testDivWV():
     # Numerical VS analytical
     Lx = box.length[0]
     errX = (Lx / (nb - 1)) ** 4
+    ind = topo.mesh.iCompute
     for i in xrange(3):
         assert np.allclose(refd[i][ind], result[i][ind], rtol=errX)
 
 
 def testGradVxW():
+    vd, wd = init(d3, computeVel, computeVort)
+    box = vd.domain
+    topo = vd.topology
     # Reference field
     ref = Field(domain=box, formula=analyticalGradVxW,
                 name='Analytical', isVector=True)
@@ -117,9 +152,15 @@ def testGradVxW():
     result = [npw.zeros(memshape) for i in xrange(3)]
     diag = npw.zeros(2)
     result, diag = gradOp(vd.data, wd.data, result, diag)
-
     # Numerical VS analytical
-    Lx = box.length[0]
-    errX = (Lx / (nb - 1)) ** 4
+    errX = (box.length[0] / (nb - 1)) ** 4
+    ind = topo.mesh.iCompute
     for i in xrange(3):
         assert np.allclose(refd[i][ind], result[i][ind], rtol=errX)
+
+
+if __name__ == "__main__":
+    testCurl()
+    testCurl2D()
+    testDivWV()
+    testGradVxW()
diff --git a/HySoP/hysop/numerics/tests/test_integrators.py b/HySoP/hysop/numerics/tests/test_integrators.py
index 9eab719fbf5c75c0027f9e122866ebdd253f88d2..daec18e7c80c6b3471351164d852d8bb528162ac 100644
--- a/HySoP/hysop/numerics/tests/test_integrators.py
+++ b/HySoP/hysop/numerics/tests/test_integrators.py
@@ -4,6 +4,7 @@ from parmepy.constants import PARMES_REAL
 import parmepy.tools.numpywrappers as npw
 import math
 import numpy as np
+from parmepy.tools.parameters import Discretization
 pi = math.pi
 sin = np.sin
 cos = np.cos
@@ -20,10 +21,9 @@ tend = 0.2
 #time = npu.seq(tinit, tend, dt)
 #nbSteps = time.size
 import parmepy as pp
-from parmepy.mpi.topology import Cartesian
-ghosts = [0]
-box = pp.Box(1, length=[2.0 * pi], origin=[0.])
-topo = Cartesian(box, 1, [nb + 1], ghosts=ghosts)
+d1 = Discretization([nb + 1])
+box = pp.Box(length=[2.0 * pi], origin=[0.])
+topo = box.create_topology(dim=1, discretization=d1)
 
 
 # A set of tests and reference functions
diff --git a/HySoP/hysop/numerics/update_ghosts.py b/HySoP/hysop/numerics/update_ghosts.py
index 407c228fcff0b7d2789f102c830354daa6e1737a..f18dce22185d005d5eaf61b5eed3494dd7a6056d 100644
--- a/HySoP/hysop/numerics/update_ghosts.py
+++ b/HySoP/hysop/numerics/update_ghosts.py
@@ -1,5 +1,5 @@
 """
-@file numerics/updateGhosts.py
+@file numerics/update_ghosts.py
 
 Update ghost points for a list of numpy arrays
 for a given topology.
@@ -29,7 +29,7 @@ class UpdateGhosts(object):
         ## The mpi topology and mesh distribution
         self.topology = topo
         ## Ghost layer
-        self.ghosts = self.topology.ghosts
+        self.ghosts = self.topology.mesh.discretization.ghosts
         # Indices of points to be filled from previous neighbour
         # Each component is a slice and corresponds to a direction in
         # the topology grid.
@@ -46,9 +46,36 @@ class UpdateGhosts(object):
         self._recvbuffer = []
         # domain dimension
         self._dim = self.topology.domain.dimension
+        
+        self._setup_slices()
+
+        ## shape of numpy arrays to be updated.
+        self.memshape = tuple(self.topology.mesh.resolution)
+        # length of memory required to save one numpy array
+        self._memoryblocksize = np.prod(self.memshape)
+        ## Number of numpy arrays that will be updated
+        self.nbElements = nbElements
+        if self.topology.size > 1:  # else no need to set buffers ...
+            # Size computation below assumes that what we send in one
+            # dir has the same size as what we receive from process in the
+            # same dir ...
+            exchange_dir = [d for d in xrange(self._dim)
+                            if self.topology.cutdir[d]]
+            # A temporary array used to calculate slices sizes
+            temp = np.zeros(self.memshape, dtype=np.int8)
+
+            for d in exchange_dir:
+                buffsize = 0
+                buffsize += temp[self._g_tonext[d]].size * self.nbElements
+                self._sendbuffer.append(npw.zeros((buffsize)))
+                self._recvbuffer.append(npw.zeros((buffsize)))
+
+    def _setup_slices(self):
+        """
+        Compute slices to send and recieve ghosts values.
+        """
         defslice = [slice(None, None, None)] * self._dim
         nogh_slice = [slice(0)] * self._dim
-
         for d in xrange(self._dim):
             if self.ghosts[d] > 0:
                 self._g_fromprevious.append(list(defslice))
@@ -78,26 +105,6 @@ class UpdateGhosts(object):
                 self._g_toprevious.append(list(nogh_slice))
                 self._g_tonext.append(list(nogh_slice))
 
-        ## shape of numpy arrays to be updated.
-        self.memshape = tuple(self.topology.mesh.resolution)
-        # length of memory required to save one numpy array
-        self._memoryblocksize = np.prod(self.memshape)
-        ## Number of numpy arrays that will be updated
-        self.nbElements = nbElements
-        if self.topology.size > 1:  # else no need to set buffers ...
-            # Size computation below assumes that what we send in one
-            # dir has the same size as what we receive from process in the
-            # same dir ...
-            exchange_dir = [d for d in xrange(self._dim)
-                            if self.topology.cutdir[d]]
-            # A temporary array used to calculate slices sizes
-            temp = np.zeros(self.memshape, dtype=np.int8)
-
-            for d in exchange_dir:
-                buffsize = 0
-                buffsize += temp[self._g_tonext[d]].size * self.nbElements
-                self._sendbuffer.append(npw.zeros((buffsize)))
-                self._recvbuffer.append(npw.zeros((buffsize)))
 
     def __call__(self, variables):
         return self.apply(variables)
@@ -112,18 +119,159 @@ class UpdateGhosts(object):
         assert (self.topology.domain.boundaries == PERIODIC).all(),\
             'Only implemented for periodic boundary conditions.'
         assert isinstance(variables, list)
-
         dirs = [d for d in xrange(self._dim)
                 if self.topology.shape[d] == 1]
+        for d in dirs:
+            self._applyBC_in_dir(variables, d)
+                
+    def _applyBC_in_dir(self, variables, d):
+        """Apply periodic boundary condition in direction d."""
+        for v in variables:
+            assert v.shape == self.memshape
+            v[self._g_fromprevious[d]] = v[self._g_tonext[d]]
+            v[self._g_fromnext[d]] = v[self._g_toprevious[d]]
+
+    @debug
+    def apply(self, variables):
+        """
+        Compute ghosts values from mpi communications and boundary conditions.
+        """
+        assert isinstance(variables, list)
+        exchange_dir = []
+        if self.topology.size > 1:
+            exchange_dir = [d for d in xrange(self._dim)
+                            if self.topology.cutdir[d]]
+        i = 0
+        for d in exchange_dir:
+            self._apply_in_dir(variables, d, i)
+            # update index in neighbours list
+            i += 1
+            # End of loop through send/recv directions.
+        # Apply boundary conditions for non-distributed directions
+        self.applyBC(variables)
 
+    def _apply_in_dir(self, variables, d, i):
+        """Communicate ghosts values in direction d for neighbour 
+        in direction i of the topology"""
+        comm = self.topology.comm
+        rank = self.topology.rank
+        neighbours = self.topology.neighbours
+        # 1 - Fill in buffers
+        # Loop through all variables that are distributed
+        pos = 0
+        nextpos = 0
         for v in variables:
             assert v.shape == self.memshape
-            for d in dirs:
-                v[self._g_fromprevious[d]] = v[self._g_tonext[d]]
-                v[self._g_fromnext[d]] = v[self._g_toprevious[d]]
+            nextpos += v[self._g_tonext[d]].size
+            self._sendbuffer[i][pos:nextpos] = v[self._g_tonext[d]].flat
+            pos = nextpos
+
+        # 2 - Send to next receive from previous
+        dest_rk = neighbours[1, i]
+        from_rk = neighbours[0, i]
+        comm.Sendrecv([self._sendbuffer[i], PARMES_MPI_REAL],
+                      dest=dest_rk, sendtag=rank,
+                      recvbuf=self._recvbuffer[i],
+                      source=from_rk, recvtag=from_rk)
+
+        # 3 - Print recvbuffer back to variables and update sendbuffer
+        # for next send
+        pos = 0
+        nextpos = 0
+        for v in variables:
+            nextpos += v[self._g_fromprevious[d]].size
+            v[self._g_fromprevious[d]].flat = \
+                self._recvbuffer[i][pos:nextpos]
+            self._sendbuffer[i][pos:nextpos] = \
+                v[self._g_toprevious[d]].flat
+            pos = nextpos
+
+        # 4 -Send to previous and receive from next
+        dest_rk = neighbours[0, i]
+        from_rk = neighbours[1, i]
+        comm.Sendrecv([self._sendbuffer[i], PARMES_MPI_REAL],
+                      dest=dest_rk, sendtag=rank,
+                      recvbuf=self._recvbuffer[i],
+                      source=from_rk, recvtag=from_rk)
+        # 5 - Print recvbuffer back to variables.
+        pos = 0
+        nextpos = 0
+        for v in variables:
+            nextpos += v[self._g_fromprevious[d]].size
+            v[self._g_fromnext[d]].flat = \
+                self._recvbuffer[i][pos:nextpos]
+            pos = nextpos
+
+
+class UpdateGhostsFull(UpdateGhosts):
+    """
+    Ghost points synchronization for a list of numpy arrays
+    """
+
+    @debug
+    def __init__(self, topo, nbElements):
+        """
+        Setup for send/recv process of ghosts points for a list
+        of numpy arrays, for a given topology.
+
+        @param topology : the topology common to all fields.
+        @param nbElements : max number of arrays that will be update
+        at each call.
+        nbElements and memshape will be used to allocate memory for local
+        buffers used for send-recv process.
+        This version differs from UpdateGhosts by computing also ghosts values 
+        in edges and corners of the domain. The directions are computed in reversed order.
+        """
+        super(UpdateGhostsFull, self).__init__(topo, nbElements)
+
+    def _setup_slices(self):
+        """
+        Computes slices to send and recieve ghosts values.
+        It assumes that directions are computed from an xrange() loop so that
+        ghosts in previous directions are completed.
+        """
+        defslice = [slice(None, None, None)] * self._dim
+        nogh_slice = [slice(0)] * self._dim
+        for d in xrange(self._dim):
+            if self.ghosts[d] > 0:
+                self._g_fromprevious.append(list(defslice))
+                self._g_fromprevious[d][d] = slice(self.ghosts[d])
+                self._g_fromnext.append(list(defslice))
+                self._g_fromnext[d][d] = slice(-self.ghosts[d], None, None)
+                self._g_toprevious.append(list(defslice))
+                self._g_toprevious[d][d] = slice(self.ghosts[d],
+                                                 2 * self.ghosts[d], None)
+                self._g_tonext.append(list(defslice))
+                self._g_tonext[d][d] = slice(-2 * self.ghosts[d],
+                                             -self.ghosts[d])
+                
+                ## Slices for other directions corresmonding to directions not
+                ## yet exchanged : x < d. For directions x > d, the slices is a 
+                ## full slice that includes ghosts. This assumes that directions 
+                ## x > d have already been computed (by communications or local
+                ## exchanges)
+                otherDim = [x for x in xrange(self._dim) if x < d]
+                for d2 in otherDim:
+                    self._g_fromprevious[d][d2] = slice(self.ghosts[d2],
+                                                        -self.ghosts[d2])
+                    self._g_fromnext[d][d2] = slice(self.ghosts[d2],
+                                                    -self.ghosts[d2])
+                    self._g_toprevious[d][d2] = slice(self.ghosts[d2],
+                                                      -self.ghosts[d2])
+                    self._g_tonext[d][d2] = slice(self.ghosts[d2],
+                                                  -self.ghosts[d2])
+            else:
+                self._g_fromprevious.append(list(nogh_slice))
+                self._g_fromnext.append(list(nogh_slice))
+                self._g_toprevious.append(list(nogh_slice))
+                self._g_tonext.append(list(nogh_slice))
 
     @debug
     def apply(self, variables):
+        """
+        Apply either mpi communications or local boundary conditions to fill ghosts.
+        Loop over directions and switch among local BC or mpi comm.
+        """
         assert isinstance(variables, list)
         comm = self.topology.comm
         rank = self.topology.rank
@@ -132,58 +280,18 @@ class UpdateGhosts(object):
         if self.topology.size > 1:
             exchange_dir = [d for d in xrange(self._dim)
                             if self.topology.cutdir[d]]
+        localBC_dir = [d for d in xrange(self._dim)
+                       if self.topology.shape[d] == 1]
+        assert len(exchange_dir) + len(localBC_dir) == self._dim
 
         i = 0
-        for d in exchange_dir:
-            # 1 - Fill in buffers
-            # Loop through all variables that are distributed
-            pos = 0
-            nextpos = 0
-            for v in variables:
-                assert v.shape == self.memshape
-                nextpos += v[self._g_tonext[d]].size
-                self._sendbuffer[i][pos:nextpos] = v[self._g_tonext[d]].flat
-                pos = nextpos
-
-            # 2 - Send to next receive from previous
-            dest_rk = neighbours[1, i]
-            from_rk = neighbours[0, i]
-            comm.Sendrecv([self._sendbuffer[i], PARMES_MPI_REAL],
-                          dest=dest_rk, sendtag=rank,
-                          recvbuf=self._recvbuffer[i],
-                          source=from_rk, recvtag=from_rk)
-
-            # 3 - Print recvbuffer back to variables and update sendbuffer
-            # for next send
-            pos = 0
-            nextpos = 0
-            for v in variables:
-                nextpos += v[self._g_fromprevious[d]].size
-                v[self._g_fromprevious[d]].flat = \
-                    self._recvbuffer[i][pos:nextpos]
-                self._sendbuffer[i][pos:nextpos] = \
-                    v[self._g_toprevious[d]].flat
-                pos = nextpos
-
-            # 4 -Send to previous and receive from next
-            dest_rk = neighbours[0, i]
-            from_rk = neighbours[1, i]
-            comm.Sendrecv([self._sendbuffer[i], PARMES_MPI_REAL],
-                          dest=dest_rk, sendtag=rank,
-                          recvbuf=self._recvbuffer[i],
-                          source=from_rk, recvtag=from_rk)
-            # 5 - Print recvbuffer back to variables.
-            pos = 0
-            nextpos = 0
-            for v in variables:
-                nextpos += v[self._g_fromprevious[d]].size
-                v[self._g_fromnext[d]].flat = \
-                    self._recvbuffer[i][pos:nextpos]
-                pos = nextpos
-
-           # update index in neighbours list
-            i += 1
-            # End of loop through send/recv directions.
+        for d in xrange(self._dim-1, -1, -1):
+            if d in localBC_dir:
+                self._applyBC_in_dir(variables, d)
+            elif d in exchange_dir:
+                self._apply_in_dir(variables, d, i)
+                # update index in neighbours list
+                i += 1
+                # End of loop through send/recv directions.
+
 
-        # Apply boundary conditions for non-distributed directions
-        self.applyBC(variables)
diff --git a/HySoP/hysop/numerics/update_ghosts.pyc b/HySoP/hysop/numerics/update_ghosts.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a182dd829945da5039c49bcaa4709ee68f304c65
Binary files /dev/null and b/HySoP/hysop/numerics/update_ghosts.pyc differ
diff --git a/HySoP/hysop/operator/SAVE_REDIS/redistribute.py b/HySoP/hysop/operator/SAVE_REDIS/redistribute.py
new file mode 100644
index 0000000000000000000000000000000000000000..608425c6b72a83d5634080cadcbf971e0c17782d
--- /dev/null
+++ b/HySoP/hysop/operator/SAVE_REDIS/redistribute.py
@@ -0,0 +1,372 @@
+"""
+@file redistribute.py
+Setup for data transfer/redistribution between two parmes topologies.
+
+This operator is an inter operator which is supposed to define the process to
+transfer variables and data from one operator to another.
+This mainly concerns data redistribution if the two operators work on
+different mpi topologies.
+
+When is it required to define an operator between op1 and op2?
+If:
+- the intersection between op1.output-variables and op2.input-variables
+  is not empty
+- AND if the topology on which the variables of op1 are defined is different
+from the one of op2 variables.
+
+Note Franck: this kind of operator may also be useful
+to define the interpolation/filter process for data transfer for
+a variable defined on several meshes.
+
+"""
+from parmepy import __VERBOSE__
+from parmepy.constants import debug, PARMES_MPI_REAL, ORDERMPI, np, S_DIR
+from parmepy.operator.continuous import Operator
+from parmepy.mpi.bridge import Bridge
+from parmepy.methods_keys import Support
+
+
+class Redistribute(Operator):
+    """
+    Interconnection between two operators.
+    SetUp will compute (or get if it already exists) a Bridge between two
+    topologies.
+    Apply redistributes data from opFrom topology to opTo topology.
+
+    """
+    @debug
+    def __init__(self, opFrom, opTo, name_suffix=None, component=None, **kwds):
+
+        """
+        Create an operator to distribute data between two mpi topologies for a
+        list of variables belonging to two operators.
+
+        @param variables : the set of variables to be redistributed
+        @param opFrom : source operator
+        @param opTo : target (i.e.) the operator that handles the topology on
+        which data must be redistributed.
+        @param component: components of vector fields to consider (default:
+        None, all components are taken).
+        """
+        super(Redistribute, self).__init__(**kwds)
+        vars_str = "_("
+        for vv in self.variables:
+            vars_str += vv.name + ","
+        vars_str = vars_str[:-1] + ')'
+        if component is not None:
+            vars_str += S_DIR[component]
+        if name_suffix is None:
+            name_suffix = ''
+        self.name += vars_str + name_suffix
+        ## Source Operator
+        self.opFrom = opFrom
+        ## Targeted operator
+        self.opTo = opTo
+
+        self.input = self.output = self.variables
+        self.evts = []
+        self._toHost_fields = []
+        self._toDevice_fields = []
+        self._hasRequests = False
+        self.component = component
+        if component is None:
+            # All components are considered
+            self._range_components = lambda v: range(v.nbComponents)
+        else:
+            # Only the given component is considered
+            self._range_components = lambda v: [component]
+        self.r_request = {}
+        self.s_request = {}
+        self._r_types = {}
+        self._s_types = {}
+        for v in self.variables:
+            self._r_types[v] = {}
+            self._s_types[v] = {}
+
+        # Enable desynchronization: the opTo operator must call the wait
+        # function of this redistribute. This operator has to know self.
+        self.opTo.addRedistributeRequirement(self)
+
+    @debug
+    def setup(self):
+        """
+        Computes intersection of two topologies.
+
+        """
+        # Then check if variables belong to both operators
+        # And check if variables have enought components.
+        for v in self.variables:
+            assert v in self.opFrom.variables and v in self.opTo.variables, \
+                'Redistribute error : one of the variable is not present\
+                in both source and target operator.'
+            if self.component is not None:
+                assert self.component >= 0, 'component needs to be positive'
+            assert v.nbComponents > self.component, \
+                'Redistribute error : variable ' + str(v.name) + ' do not \
+                have enough components (' + str(self.component) + ')'
+        assert self.opFrom.isUp() and self.opTo.isUp(), \
+            """You should setup both opFrom and opTo operators
+            before any attempt to setup a redistribute operator."""
+
+        # Look for an operator operating on device.
+        try:
+            opFrom_is_device = \
+                self.opFrom.method[Support].find('gpu') >= 0
+        except KeyError:  # op.method is a dict not containing Support in keys
+            opFrom_is_device = False
+        except IndexError:  # op.method is a string
+            opFrom_is_device = False
+        except TypeError:  # op.method is None
+            opFrom_is_device = False
+        try:
+            opTo_is_device = \
+                self.opTo.method[Support].find('gpu') >= 0
+        except KeyError:  # op.method is a dict not containing Support in keys
+            opTo_is_device = False
+        except IndexError:  # op.method is a sting
+            opTo_is_device = False
+        except TypeError:  # op.method is None
+            opTo_is_device = False
+
+        if not opFrom_is_device and not opTo_is_device:
+            # case: opFrom(host) --host--> opTo(host)
+            self.apply = self._host
+            self.wait = self._wait_host
+        else:
+            # Have on device operators
+            self.wait = self._wait_all
+            if opFrom_is_device and not opTo_is_device:
+                # case: opFrom(GPU) --toHost--host--> opTo(host)
+                self.apply = self._apply_toHost_host
+            elif not opFrom_is_device and opTo_is_device:
+                # case: opFrom(host) --host--toDevice--> opTo(GPU)
+                self.apply = self._apply_host_toDevice
+            else:
+                # case: opFrom(GPU) --toHost--host--toDevice--> opTo(host)
+                # Transfers are removed if variables are batched
+                if np.any([self.opFrom.discreteFields[v].isBatch
+                           for v in self.variables] +
+                          [self.opTo.discreteFields[v].isBatch
+                           for v in self.variables]):
+                    self.apply = self._host
+                else:
+                    self.apply = self._apply_toHost_host_toDevice
+
+        # Build bridges and toTransfer lists
+        self.bridges = {}
+        backup = None
+        lastvar = None
+        # Create bridges between topologies, for each variable.
+        for v in self.variables:
+            # Bridges creation
+            topofrom = self.opFrom.discreteFields[v].topology
+            topoto = self.opTo.discreteFields[v].topology
+            if backup is not None:
+                # Check if a similar bridge has not already been created.
+                if [topofrom, topoto] == backup:
+                    self.bridges[v] = self.bridges[lastvar]
+            else:
+                self.bridges[v] = Bridge(topofrom, topoto)
+            backup = [topofrom, topoto]
+            lastvar = v
+            # toTransfer list completion
+            if opFrom_is_device:
+                self._toHost_fields.append(self.opFrom.discreteFields[v])
+            if opTo_is_device:
+                self._toDevice_fields.append(self.opTo.discreteFields[v])
+
+        self._main_comm = self.opFrom.discreteFields[v].topology.parent()
+        self._main_rank = self._main_comm.Get_rank()
+
+        # Flag telling if there will be some mpi data transfers.
+        self._useless_transfer = {}
+        for v in self.variables:
+            self._useless_transfer[v] = \
+                (opFrom_is_device and opTo_is_device) and \
+                len(self.bridges[v].recvFrom.keys()) == 0 and \
+                len(self.bridges[v].sendTo.keys()) == 0
+
+        # Build MPI subarrays
+        dim = self.domain.dimension
+        for v in self.variables:
+            br = self.bridges[v]
+            vToShape = self.opTo.discreteFields[v].data[0].shape
+            vFromShape = self.opFrom.discreteFields[v].data[0].shape
+            for rk in br.recvFrom.keys():
+                subvshape = tuple([br.recvFrom[rk][i].stop -
+                                   br.recvFrom[rk][i].start
+                                   for i in range(dim)])
+                substart = tuple([br.recvFrom[rk][i].start
+                                  for i in range(dim)])
+                self._r_types[v][rk] = \
+                    PARMES_MPI_REAL.Create_subarray(vToShape,
+                                                    subvshape,
+                                                    substart,
+                                                    order=ORDERMPI)
+                self._r_types[v][rk].Commit()
+            for rk in br.sendTo.keys():
+                subvshape = tuple([br.sendTo[rk][i].stop -
+                                   br.sendTo[rk][i].start
+                                   for i in range(dim)])
+                substart = tuple([br.sendTo[rk][i].start
+                                  for i in range(dim)])
+                self._s_types[v][rk] = \
+                    PARMES_MPI_REAL.Create_subarray(vFromShape,
+                                                    subvshape,
+                                                    substart,
+                                                    order=ORDERMPI)
+                self._s_types[v][rk].Commit()
+
+        self._is_uptodate = True
+
+    def _apply_toHost_host_toDevice(self, simulation=None):
+        if __VERBOSE__:
+            print ("{0} APPLY toHOST+HOST+toDEVICE".format(self._main_rank))
+        self._toHost()
+        self._wait_device()
+        self._host()
+        self._wait_host()
+        self._toDevice()
+
+    def _apply_toHost_host(self, simulation=None):
+        if __VERBOSE__:
+            print ("{0} APPLY toHOST+HOST".format(self._main_rank))
+        self._toHost()
+        self._wait_device()
+        self._host()
+
+    def _apply_host_toDevice(self, simulation=None):
+        if __VERBOSE__:
+            print ("{0} APPLY HOST+toDEVICE".format(self._main_rank))
+        self._host()
+        self._wait_host()
+        self._toDevice()
+
+    def _toHost(self):
+        """
+        Proceed with data transfer of variables from device to host
+        """
+        for v in self.variables:
+            dv = self.opFrom.discreteFields[v]
+            if dv in self._toHost_fields:
+                if not self._useless_transfer[v]:
+                    dv.toHost(self.component)
+
+    def _toDevice(self):
+        """
+        Proceed with data transfer of variables from device to host
+        """
+        for v in self.variables:
+            dv = self.opTo.discreteFields[v]
+            if dv in self._toDevice_fields:
+                if not self._useless_transfer[v]:
+                    dv.toDevice(self.component)
+
+    def _host(self, simulation=None):
+        """
+        Proceed with data redistribution from opFrom to opTo
+        """
+        # TODO :
+        # - save a set of bridges in the domain and access them from operator
+        # - process all variables in one shot if they have the same topo
+        # (use buffers for mpi send/recv? )
+        # - move MPI datatypes into the bridge? --> and free MPI type properly
+        if __VERBOSE__:
+            print ("{0} APPLY HOST".format(self._main_rank))
+        self.r_request = {}
+        self.s_request = {}
+        for v in self.variables:
+            br = self.bridges[v]
+            # Apply for each component considered
+            for d in self._range_components(v):
+                if __VERBOSE__:
+                    print ("{0} APPLY HOST".format(self._main_rank),
+                           self.opFrom.discreteFields[v].name, '->',
+                           self.opTo.discreteFields[v].name, S_DIR[d])
+                vTo = self.opTo.discreteFields[v].data[d]
+                vFrom = self.opFrom.discreteFields[v].data[d]
+                v_name = self.opFrom.discreteFields[v].name + S_DIR[d]
+                if br.hasLocalInter:
+                    vTo[br.ito] = vFrom[br.ifrom]
+                cRk = self._main_comm.Get_rank()
+                for rk in br.recvFrom.keys():
+                    recvtag = (cRk + 1) * 989 + (rk + 1) * 99 + (d + 1) * 88
+                    self.r_request[v_name + str(rk)] = \
+                        self._main_comm.Irecv([vTo, 1, self._r_types[v][rk]],
+                                              source=rk, tag=recvtag)
+                    self._hasRequests = True
+                for rk in br.sendTo.keys():
+                    sendtag = (rk + 1) * 989 + (cRk + 1) * 99 + (d + 1) * 88
+                    self.s_request[v_name + str(rk)] = \
+                        self._main_comm.Issend([vFrom, 1,
+                                               self._s_types[v][rk]],
+                                               dest=rk, tag=sendtag)
+                    self._hasRequests = True
+
+    def _wait_host(self):
+        """
+        MPI Barrier to wait for the end
+        of all communication requests.
+        """
+        if __VERBOSE__:
+            print ("{0}", "WAIT MPI".format(self._main_rank),
+                   self._hasRequests)
+        if self._hasRequests:
+            for rk in self.r_request.keys():
+                self.r_request[rk].Wait()
+            for rk in self.s_request.keys():
+                self.s_request[rk].Wait()
+        self._hasRequests = False
+
+    def _wait_device(self):
+        if __VERBOSE__:
+            print ("{0}".format(self._main_rank), "WAITING OPENCL")
+        for dv in self._toDevice_fields + self._toHost_fields:
+            dv.wait()
+
+    def _wait_all(self):
+        self._wait_host()
+        self._wait_device()
+
+    def test(self, rsend=None, rrecv=None):
+        """
+        if neither rsend or rrecv is given return
+        True if all communication request are complete
+        else check for sending to rsend or
+        receiving from rrecv. Process ranks
+        should be given in main_comm.
+        @param rsend : discrete variable name + S_DIR + rank of the process
+        to which a message has been sent
+        and for which we want to test
+        message completion.
+        @param  rrecv : discrete variable name + S_DIR + rank of the process
+        from which a message has been receive
+        and for which we want to test
+        message completion.
+        """
+        if(rsend is not None or rrecv is not None):
+            send_res = True
+            recv_res = True
+            if rsend is not None:
+                send_res = self.s_request[rsend].Test()
+            if rrecv is not None:
+                recv_res = self.r_request[rrecv].Test()
+            res = send_res and recv_res
+        else:
+            res = True
+            for rk in self.r_request.keys():
+                res = self.r_request[rk].Test()
+                if not res:
+                    return res
+            for rk in self.s_request.keys():
+                res = self.s_request[rk].Test()
+                if not res:
+                    return res
+        return res
+
+    def addRedistributeRequirement(self, red):
+        raise ValueError(
+            "Cannot add a requirement to a Redistribute operator.")
+
+    def getRedistributeRequirement(self):
+        return []
diff --git a/HySoP/hysop/operator/SAVE_REDIS/redistribute_intercomm.py b/HySoP/hysop/operator/SAVE_REDIS/redistribute_intercomm.py
new file mode 100644
index 0000000000000000000000000000000000000000..749723490e1774a479d9c1a9fffccddbd55cb106
--- /dev/null
+++ b/HySoP/hysop/operator/SAVE_REDIS/redistribute_intercomm.py
@@ -0,0 +1,343 @@
+"""
+@file redistribute_intercomm.py
+Setup for data transfer/redistribution between a single parmes topology based
+on different MPI communicators with null intersection (for example
+by Comm_Split). One of the topology is labeled as the source and the other is
+the destination.
+
+It relies on a Bridge_intercomm.
+"""
+from parmepy.constants import debug, PARMES_MPI_REAL, ORDERMPI, S_DIR, np
+from parmepy import __VERBOSE__
+from parmepy.operator.continuous import Operator
+from parmepy.mpi.topology import Bridge_intercomm
+from parmepy.methods_keys import Support
+
+
+class RedistributeIntercomm(Operator):
+    """
+    Interconnection between two topologies on different sub set of MPI process.
+    SetUp will compute a Bridge_intercomm between a single topology.
+    Transfers data from topology of id_from to the id_to.
+    """
+    @debug
+    def __init__(self, op_from, op_to, proc_tasks,
+                parent_comm, component=None, name_suffix='', **kwds):
+        """
+        Create an operator to distribute data between two mpi topologies for a
+        list of variables.
+
+        @param variables : the set of variables to be redistributed
+        @param topo : Parmes topology that differs across process of the
+        parent_comm MPI intracommunicator.
+        @param id_from : id of the task considered as input.
+        @param id_to : id of the task considered as output.
+        @param proc_tasks: python array specifying the task id of each of
+        the parent_comm MPI intracommunicator.
+        @param parent_comm : Parent communicator (Each process that use this
+        operator must be a member of the parent_comm)
+        @param component : Component to consider.
+        @remark : proc_tasks size and number of processus in parent_comm
+        must be equal.
+        """
+        super(RedistributeIntercomm, self).__init__(**kwds)
+        vars_str = "_("
+        for vv in self.variables:
+            vars_str += vv.name + ","
+        vars_str = vars_str[:-1] + ')'
+        if not component is None:
+            vars_str += S_DIR[component]
+        self.name += vars_str+name_suffix
+        assert parent_comm.Get_size() == len(proc_tasks), \
+            "Parent communicator ({0})".format(parent_comm.Get_size()) + \
+            " and size of the task id array " + \
+            "({0}) are not equal".format(len(proc_tasks))
+        self.opFrom = op_from
+        self.opTo = op_to
+        self.id_from = self.opFrom.task_id
+        self.id_to = self.opTo.task_id
+        self.parent_comm = parent_comm
+        self._dim = self.variables[0].domain.dimension
+        self.proc_tasks = proc_tasks
+        self.input = self.output = self.variables
+        self.component = component
+        if component is None:
+            # All components are considered
+            self._range_components = lambda v: range(v.nbComponents)
+        else:
+            # Only the given component is considered
+            self._range_components = lambda v: [component]
+
+        self.bridges = {}
+        self.r_request = {}
+        self.s_request = {}
+        self._r_types = {}
+        self._s_types = {}
+        for v in self.variables:
+            self._r_types[v] = {}
+            self._s_types[v] = {}
+        self._toHost_fields = []
+        self._toDevice_fields = []
+        self._parent_rank = self.parent_comm.Get_rank()
+        self._my_rank = None
+
+    def discretize(self):
+
+        for v in self.variables:
+            if self.topology is None:
+                if self.proc_tasks[self._parent_rank] == self.id_from:
+                    self.topology = self.opFrom.discreteFields[v].topology
+                else:
+                    self.topology = self.opTo.discreteFields[v].topology
+
+        self._my_rank = self.topology.comm.Get_rank()
+        self._dim = self.topology.domain.dimension
+
+        for v in self.variables:
+            self.discreteFields[v] = v.discretize(self.topology)
+
+    @debug
+    def setup(self):
+        """
+        Computes intersection of topologies and set the MPI intercommunicator.
+        """
+        assert self.topology.is_uptodate, \
+            """You should setup topology
+            before any attempt to setup a redistribute operator."""
+
+        # Look for an operator opertating on device.
+        try:
+            opFrom_is_device = \
+                self.opFrom.method[Support].find('gpu') >= 0
+        except KeyError:  # op.method is a dict not containing Support in keys
+            opFrom_is_device = False
+        except IndexError:  # op.method is a sting
+            opFrom_is_device = False
+        except TypeError:  # op.method is None
+            opFrom_is_device = False
+        try:
+            opTo_is_device = \
+                self.opTo.method[Support].find('gpu') >= 0
+        except KeyError:  # op.method is a dict not containing Support in keys
+            opTo_is_device = False
+        except IndexError:  # op.method is a sting
+            opTo_is_device = False
+        except TypeError:  # op.method is None
+            opTo_is_device = False
+
+        if not opFrom_is_device and not opTo_is_device:
+            # case: opFrom(host) --bridge--> opTo(host)
+            self._the_apply = self._apply_host
+        else:
+            # Have on device operators
+            if opFrom_is_device and not opTo_is_device:
+                # case: opFrom(GPU) --toHost--bridge--> opTo(host)
+                self._the_apply = self._apply_toHost_host
+            elif not opFrom_is_device and opTo_is_device:
+                # case: opFrom(host) --bridge--toDevice--> opTo(GPU)
+                self._the_apply = self._apply_host_toDevice
+            else:
+                # case: opFrom(GPU) --toHost--bridge--toDevice--> opTo(host)
+                # Transfers are removed if variables are batched
+                if np.any([self.opFrom.discreteFields[v].isBatch
+                           for v in self.variables] +
+                          [self.opTo.discreteFields[v].isBatch
+                           for v in self.variables]):
+                    self._the_apply = self._host
+                else:
+                    self._the_apply = self._apply_toHost_host_toDevice
+
+        # Build bridges and toTransfer lists
+        self.bridge = Bridge_intercomm(self.topology, self.parent_comm,
+                                       self.id_from, self.id_to,
+                                       self.proc_tasks)
+
+        for v in self.variables:
+            # toTransfer list completion
+            if self.proc_tasks[self._parent_rank] == self.id_from:
+                if opFrom_is_device:
+                    self._toHost_fields.append(self.opFrom.discreteFields[v])
+            if self.proc_tasks[self._parent_rank] == self.id_to:
+                if opTo_is_device:
+                    self._toDevice_fields.append(self.opTo.discreteFields[v])
+
+        for v in self.variables:
+            dv = v.discreteFields[self.topology]
+            transfers = self.bridge.transfers
+            # Set reception
+            if self.proc_tasks[self._parent_rank] == self.id_to:
+                for from_rk in transfers.keys():
+                    subshape = tuple(
+                        [transfers[from_rk][i][1] - transfers[from_rk][i][0]
+                         for i in range(self._dim)])
+                    substart = tuple(
+                        [transfers[from_rk][i][0] for i in range(self._dim)])
+                    self._r_types[v][from_rk] = \
+                        PARMES_MPI_REAL.Create_subarray(dv.data[0].shape,
+                                                        subshape,
+                                                        substart,
+                                                        order=ORDERMPI)
+                    self._r_types[v][from_rk].Commit()
+            # Set Sending
+            if self.proc_tasks[self._parent_rank] == self.id_from:
+                for to_rk in transfers.keys():
+                    subshape = tuple(
+                        [transfers[to_rk][i][1] - transfers[to_rk][i][0]
+                         for i in range(self._dim)])
+                    substart = tuple(
+                        [transfers[to_rk][i][0] for i in range(self._dim)])
+                    self._r_types[v][to_rk] = \
+                        PARMES_MPI_REAL.Create_subarray(dv.data[0].shape,
+                                                        subshape,
+                                                        substart,
+                                                        order=ORDERMPI)
+                    self._r_types[v][to_rk].Commit()
+        self._is_uptodate = True
+
+    @debug
+    def apply(self, simulation=None):
+        """
+        Apply this operator to its variables.
+        @param simulation : object that describes the simulation
+        parameters (time, time step, iteration number ...), see
+        parmepy.problem.simulation.Simulation for details.
+        """
+        for req in self.requirements:
+            req.wait()
+        self._the_apply(simulation)
+
+    def _apply_toHost_host_toDevice(self, simulation=None):
+        if __VERBOSE__:
+            print ("{0} APPLY toHOST+HOST+toDEVICE".format(self._parent_rank))
+        if self.proc_tasks[self._parent_rank] == self.id_from:
+            self._toHost()
+            self._wait_device()
+        self._host()
+        self._wait_host()
+        if self.proc_tasks[self._parent_rank] == self.id_to:
+            self._toDevice()
+            self._wait_device()
+
+    def _apply_toHost_host(self, simulation=None):
+
+        if __VERBOSE__:
+            print ("{0} APPLY toHOST+HOST".format(self._parent_rank))
+        if self.proc_tasks[self._parent_rank] == self.id_from:
+            self._toHost()
+            self._wait_device()
+        self._host()
+        self._wait_host()
+
+    def _apply_host_toDevice(self, simulation=None):
+        if __VERBOSE__:
+            print ("{0} APPLY HOST+toDEVICE".format(self._parent_rank))
+        self._host()
+        self._wait_host()
+        self.parent_comm.Barrier()
+        if self.proc_tasks[self._parent_rank] == self.id_to:
+            self._toDevice()
+            self._wait_device()
+        self.parent_comm.Barrier()
+
+    def _apply_host(self, simulation=None):
+        if __VERBOSE__:
+            print ("{0} APPLY HOST".format(self._parent_rank))
+        self._host()
+        self._wait_host()
+
+    def _host(self, simulation=None):
+        """
+        Proceed with data redistribution from opFrom to opTo
+        """
+        self.parent_comm.Barrier()
+        self.r_request = {}
+        self.s_request = {}
+        for v in self.variables:
+            dv = v.discreteFields[self.topology]
+            transfers = self.bridge.transfers
+            for d in self._range_components(v):
+                v_name = dv.name + S_DIR[d]
+                # Set reception
+                if self.proc_tasks[self._parent_rank] == self.id_to:
+                    for from_rk in transfers.keys():
+                        self.r_request[v_name + str(from_rk)] = \
+                            self.bridge.inter_comm.Irecv(
+                                [dv.data[d], 1, self._r_types[v][from_rk]],
+                                source=from_rk, tag=from_rk)
+                # Set Sending
+                if self.proc_tasks[self._parent_rank] == self.id_from:
+                    for to_rk in transfers.keys():
+                        self.s_request[v_name + str(to_rk)] = \
+                            self.bridge.inter_comm.Issend(
+                                [dv.data[d], 1, self._r_types[v][to_rk]],
+                                dest=to_rk, tag=self._my_rank)
+
+    def _toHost(self):
+        """
+        Proceed with data transfer of variables from device to host
+        """
+        if __VERBOSE__:
+            print ("{0} APPLY toHOST".format(self._parent_rank))
+        for v in self.variables:
+            dv = self.opFrom.discreteFields[v]
+            if dv in self._toHost_fields:
+                dv.toHost(self.component)
+
+    def _toDevice(self):
+        """
+        Proceed with data transfer of variables from device to host
+        """
+        if __VERBOSE__:
+            print ("{0} APPLY toDEVICE".format(self._parent_rank))
+        for v in self.variables:
+            dv = self.opTo.discreteFields[v]
+            if dv in self._toDevice_fields:
+                dv.toDevice(self.component)
+
+    def _wait_device(self):
+        if __VERBOSE__:
+            print ("{0} WAIT OPENCL".format(self._parent_rank))
+        for dv in self._toDevice_fields + self._toHost_fields:
+            dv.wait()
+
+    def _wait_host(self, simulation=None):
+        """Wait for requests completion."""
+        if __VERBOSE__:
+            print ("{0} WAIT MPI".format(self._parent_rank))
+        for rk in self.r_request:
+            self.r_request[rk].Wait()
+        for rk in self.s_request:
+            self.s_request[rk].Wait()
+        self.parent_comm.Barrier()
+        self.r_request = []
+        self.s_request = []
+
+    def test(self, rsend=None, rrecv=None):
+        """
+        if neither rsend or rrecv is given return
+        True if all communication request are complete
+        else check for sending to rsend or
+        receiving from rrecv. Process ranks
+        should be given in local communicator.
+        @param rsend : variable name + S_DIR + rank
+        @param rrecv : variable name + S_DIR + rank
+        """
+        if(rsend is not None or rrecv is not None):
+            send_res = True
+            recv_res = True
+            if rsend is not None:
+                send_res = self.s_request[rsend].Test()
+            if rrecv is not None:
+                recv_res = self.r_request[rrecv].Test()
+            res = send_res and recv_res
+        else:
+            res = True
+            for rk in self.r_request.keys():
+                res = self.r_request[rk].Test()
+                if not res:
+                    return res
+            for rk in self.s_request.keys():
+                res = self.s_request[rk].Test()
+                if not res:
+                    return res
+        return res
diff --git a/HySoP/hysop/operator/__init__.py b/HySoP/hysop/operator/__init__.py
index 3fdf73ccb7942c7f8cf48d15c2192d80b3973bf9..0e4665b3ffe12a407e7ebbfdfe4bead873916239 100644
--- a/HySoP/hysop/operator/__init__.py
+++ b/HySoP/hysop/operator/__init__.py
@@ -8,7 +8,7 @@
 # To define and apply the advection of a scalar rho at velocity v:
 #\code
 # advec = parmepy.operator.advection(v, rho, ...)
-# advec.setUp()
+# advec.setup()
 # ...
 # advec.apply()
 # \endcode
diff --git a/HySoP/hysop/operator/__init__.pyc b/HySoP/hysop/operator/__init__.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cc4dfabe59d0885152a8b982ac2dcb0c70fd6d24
Binary files /dev/null and b/HySoP/hysop/operator/__init__.pyc differ
diff --git a/HySoP/hysop/operator/adapt_timestep.py b/HySoP/hysop/operator/adapt_timestep.py
index cefdd1c83c1b2bb895194ad556b980914889fd03..d81139cbe5acf5fc64527f677f57a446451ef9f7 100755
--- a/HySoP/hysop/operator/adapt_timestep.py
+++ b/HySoP/hysop/operator/adapt_timestep.py
@@ -10,12 +10,13 @@ from parmepy.methods_keys import TimeIntegrator, SpaceDiscretisation,\
     dtCrit
 from parmepy.numerics.finite_differences import FD_C_4
 from parmepy.operator.discrete.adapt_timestep import AdaptTimeStep_D
-from parmepy.operator.continuous import Operator
-from parmepy.fields.variable_parameter import VariableParameter
+from parmepy.operator.continuous import opsetup
+from parmepy.operator.computational import Computational
 import parmepy.default_methods as default
+from parmepy.mpi import main_comm, MPI
 
 
-class AdaptTimeStep(Operator):
+class AdaptTimeStep(Computational):
     """
     The adaptative Time Step is computed according
     to the following expression :
@@ -23,8 +24,9 @@ class AdaptTimeStep(Operator):
     """
 
     @debug
-    def __init__(self, velocity, vorticity, dt_adapt, io_params=None,
-                 time_range=None, lcfl=0.125, cfl=0.5, **kwds):
+    def __init__(self, velocity, vorticity, simulation,
+                 time_range=None, lcfl=0.125, cfl=0.5,
+                 maxdt=9999., **kwds):
         """
         Create a timeStep-evaluation operator from given
         velocity and vorticity variables.
@@ -41,29 +43,63 @@ class AdaptTimeStep(Operator):
         has no effect. Start/end are iteration numbers.
         Default = [2, endofsimu]
         """
+        assert 'variables' not in kwds, 'variables parameter is useless.'
         super(AdaptTimeStep, self).__init__(variables=[velocity, vorticity],
                                             **kwds)
         if self.method is None:
             self.method = default.ADAPT_TIME_STEP
+        assert SpaceDiscretisation in self.method.keys()
+        assert TimeIntegrator in self.method.keys()
+        if dtCrit not in self.method.keys():
+            self.method[dtCrit] = 'vort'
+
         ## velocity variable (vector)
         self.velocity = velocity
         ## vorticity variable (vector)
         self.vorticity = vorticity
         ## adaptative time step variable ("variable" object)
-        self.dt_adapt = dt_adapt
-        assert isinstance(self.dt_adapt, VariableParameter)
+        self.simulation = simulation
+        #assert isinstance(self.dt_adapt, VariableParameter)
         # Check if 'dt' key is present in dt_adapt dict
-        assert 'dt' in self.dt_adapt.data
-        self.io_params = io_params
-        assert SpaceDiscretisation in self.method.keys()
-        assert TimeIntegrator in self.method.keys()
-        if not dtCrit in self.method.keys():
-            self.method[dtCrit] = 'vort'
+        #assert 'dt' in self.dt_adapt.data
 
-        self.input = [self.velocity, self.vorticity]
+        self.input = self.variables
         self.output = [self.vorticity]
         self.time_range = time_range
-        self.lcfl, self.cfl = lcfl, cfl
+        self.lcfl, self.cfl, self.maxdt = lcfl, cfl, maxdt
+        self._intercomms = {}
+        self._set_inter_comm()
+
+    def _set_inter_comm(self):
+        """
+        Create intercommunicators, if required (i.e. if there are several
+        tasks defined in the domain).
+        """
+        task_is_source = self._mpis.task_id == self.domain.currentTask()
+        tasks_list = self.domain.tasks_list()
+        others = (v for v in tasks_list if v != self._mpis.task_id)
+        if task_is_source:
+            remote_leader = set([tasks_list.index(i) for i in others])
+        else:
+            remote_leader = set([tasks_list.index(self._mpis.task_id)])
+
+        for rk in remote_leader:
+            self._intercomms[rk] = self.domain.comm_task.Create_intercomm(
+                0, main_comm, rk)
+
+    def get_work_properties(self):
+        if not self._is_discretized:
+            msg = 'The operator must be discretized '
+            msg += 'before any call to this function.'
+            raise RuntimeError(msg)
+
+        vd = self.discreteFields[self.velocity]
+        shape_v = vd[0].shape
+        rwork_length = self.velocity.nbComponents ** 2
+        res = {'rwork': [], 'iwork': None}
+        for _ in xrange(rwork_length):
+            res['rwork'].append(shape_v)
+        return res
 
     def discretize(self):
         if self.method[SpaceDiscretisation] is FD_C_4:
@@ -74,15 +110,33 @@ class AdaptTimeStep(Operator):
         super(AdaptTimeStep, self)._standard_discretize(nbGhosts)
 
     @debug
-    def setUp(self):
-        self.discreteOperator =\
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
+        self.discrete_op =\
             AdaptTimeStep_D(self.discreteFields[self.velocity],
                             self.discreteFields[self.vorticity],
-                            self.dt_adapt, method=self.method,
+                            self.simulation, method=self.method,
                             time_range=self.time_range,
-                            io_params=self.io_params,
                             lcfl=self.lcfl,
-                            cfl=self.cfl)
+                            cfl=self.cfl,
+                            maxdt=self.maxdt,
+                            rwork=rwork, iwork=iwork)
+        # Output setup
+        self._set_io('dt_adapt', (1, 7))
+        self.discrete_op.setWriter(self._writer)
+        self._is_uptodate = True
 
-        self.discreteOperator.setUp()
-        self._isUpToDate = True
+    def wait(self):
+        task_is_source = self._mpis.task_id == self.domain.currentTask()
+        rank = self._mpis.rank
+        dt = self.simulation.timeStep
+        for rk in self._intercomms:
+            if task_is_source:
+                # Local 0 broadcast current_indices to remote comm
+                if rank == 0:
+                    self._intercomms[rk].bcast(dt, root=MPI.ROOT)
+                else:
+                    self._intercomms[rk].bcast(dt, root=MPI.PROC_NULL)
+            else:
+                dt = self._intercomms[rk].bcast(dt, root=0)
+                self.simulation.updateTimeStep(dt)
diff --git a/HySoP/hysop/operator/advection.py b/HySoP/hysop/operator/advection.py
index 2fed4e0a7fd10af3ee69ddf012a962ae19a7355f..9c443c29dd490a43fc0650b9f5b1cce3d15a0b16 100644
--- a/HySoP/hysop/operator/advection.py
+++ b/HySoP/hysop/operator/advection.py
@@ -3,18 +3,20 @@
 
 Advection of a field.
 """
-from parmepy.constants import debug, np, PARMES_INDEX, S_DIR
-from parmepy.operator.continuous import Operator
+from parmepy.constants import debug, S_DIR, ZDIR
+from parmepy.operator.computational import Computational
 from parmepy.methods_keys import Scales, TimeIntegrator, Interpolation,\
     Remesh, Support, Splitting, MultiScale
 from parmepy.numerics.remeshing import L2_1
-from parmepy.fields.continuous import Field
-from parmepy.operator.redistribute import Redistribute
-from parmepy.operator.advectionDir import AdvectionDir
+from parmepy.operator.continuous import opsetup, opapply
+from parmepy.operator.advection_dir import AdvectionDir
 import parmepy.default_methods as default
+from parmepy.tools.parameters import Discretization
+from parmepy.mpi.topology import Cartesian
+import parmepy.tools.numpywrappers as npw
 
 
-class Advection(Operator):
+class Advection(Computational):
     """
     Advection of a field,
     \f{eqnarray*}
@@ -47,315 +49,387 @@ class Advection(Operator):
     """
 
     @debug
-    def __init__(self, velocity, advectedFields, **kwds):
+    def __init__(self, velocity, advected_fields=None, **kwds):
         """
-        Create a Transport operator from given variables velocity and scalar.
-
-        @param velocity : velocity variable.
-        @param advectedFields : Advected fields (may be a list of Fields).
-        @param resolutions : list of resolutions (one per variable)
-        @param method : Method used
-        @param splittingConfig : Dimensional splitting configuration
-        (default 'o2')
-        @param topo : a predefined topology to discretize variables
+        Advection of a set of fields for a given velocity.
+
+        @param velocity : velocity field used for advection
+        @param advectedFields : the list of fields to be advected.
+        It may be a single field (no list).
         """
-        v = [velocity]
-        if isinstance(advectedFields, list):
-            self.advectedFields = advectedFields
+        ## Transport velocity
+        self.velocity = velocity
+        if 'variables' in kwds:
+            kw = kwds.copy()
+            kw['variables'] = kwds['variables'].copy()
+            # In that case, variables must contains only the advected fields
+            # with their discretization param.
+            # Velocity must always be given outside variables, with its
+            # own discretization.
+            assert advected_fields is None, 'too many input arguments.'
+            self.advected_fields = kwds['variables'].keys()
+            kw['variables'][self.velocity] = kwds['discretization']
+            kw.pop('discretization')
+            super(Advection, self).__init__(**kw)
+
         else:
-            self.advectedFields = [advectedFields]
-        [v.append(f) for f in self.advectedFields]
-        vars_str = "_("
-        for vv in self.advectedFields:
-            vars_str += vv.name + ","
-        super(Advection, self).__init__(variables=v, **kwds)
-        self.name += vars_str[0:-1] + ')'
+            v = [self.velocity]
+            if isinstance(advected_fields, list):
+                self.advected_fields = advected_fields
+            else:
+                self.advected_fields = [advected_fields]
+            v += self.advected_fields
+            super(Advection, self).__init__(variables=v, **kwds)
+
+        # Set default method, if required
         if self.method is None:
             self.method = default.ADVECTION
-        ## Transport velocity
-        self.velocity = velocity
-        ## Transported fields
-        self.output = self.advectedFields
+
+        self.output = self.advected_fields
         self.input = [var for var in self.variables]
-        self._isMultiScale = False
-        v_resol = self.resolutions[self.velocity]
-        if v_resol != self.resolutions[self.advectedFields[0]]:
-            self._isMultiScale = True
-        if self._isMultiScale and not MultiScale in self.method.keys():
-            print ("Using default mutiscale interpolation : L2_1")
-            self.method[MultiScale] = L2_1
+
+        vars_str = "_("
+        for vv in self.advected_fields:
+            vars_str += vv.name + ","
+        self.name += vars_str[0:-1] + ')'
+
         self.config = {}
+
+        # Find which solver is used for advection,
+        # among Scales, pure-python and GPU-like.
+        # Check also operator-splitting type.
+
+        # ---- Scales advection ----
         if Scales in self.method.keys():
-            self._isSCALES = True
-            if not Splitting in self.method.keys():
+            self._is_scales = True
+            if not self.domain.dimension == 3:
+                raise ValueError("Scales Advection not implemented in 2D.")
+            # Default splitting = Strang
+            if Splitting not in self.method.keys():
                 self.method[Splitting] = 'strang'
+
+            self._my_setup = self._setup_scales
+            self.advec_dir = None
+
         else:
-            self._isSCALES = False
+            # ---- Python or GPU advection ----
+            self._is_scales = False
             assert TimeIntegrator in self.method.keys()
             assert Interpolation in self.method.keys()
             assert Remesh in self.method.keys()
             assert Support in self.method.keys()
-            if not Splitting in self.method.keys():
+            if Splitting not in self.method.keys():
                 self.method[Splitting] = 'o2'
+            dimension = self.domain.dimension
+            self.advec_dir = [None] * dimension
+            name = vars_str[0:-1] + ')'
+            if 'variables' in kwds:
+                for i in xrange(self.domain.dimension):
+                    self.advec_dir[i] = AdvectionDir(
+                        self.velocity, direction=i,
+                        name_suffix=name, **kwds)
+            else:
+                for i in xrange(self.domain.dimension):
+                    self.advec_dir[i] = AdvectionDir(
+                        self.velocity, direction=i,
+                        advected_fields=self.advected_fields,
+                        name_suffix=name, **kwds)
+
+            self._my_setup = self._setup_python
+            self.apply = self._apply_python
+
         self._old_dir = 0
         self.splitting = []
-        self._dim = self.velocity.dimension
-        self.advecDir = None
-        if not self._isSCALES:
-            particles_advectedFields = [
-                Field(adF.domain, name="Particle_AdvectedFields",
-                      isVector=adF.isVector)
-                for adF in self.advectedFields]
-            if self.method[Support].find('gpu_1k') >= 0:
-                particles_positions = None
-            else:
-                particles_positions = \
-                    Field(self.advectedFields[0].domain,
-                          name="Particle_Position",  isVector=False
-                          )
-
-            # Directional continuous Advection operators
-            self.advecDir = [None] * self._dim
-            for i in xrange(self._dim):
-                self.advecDir[i] = AdvectionDir(
-                    self.velocity, self.advectedFields, i,
-                    particles_advectedFields, particles_positions,
-                    isMultiScale=self._isMultiScale,
-                    name_suffix=vars_str[0:-1] + ')', **kwds)
-
-        # function to switch between CPU or GPU setup.
-        self._my_setUp = None
-    @debug
+
+    def scales_parameters(self):
+        """
+        Return the name of the particular method used in scales
+        and the type of splitting.
+        """
+        order = None
+        for o in ['p_O2', 'p_O4', 'p_L2',
+                  'p_M4', 'p_M6', 'p_M8',
+                  'p_44', 'p_64', 'p_66', 'p_84']:
+            if self.method[Scales].find(o) >= 0:
+                order = o
+        if order is None:
+            print ('Unknown advection method, turn to default (p_M6).')
+            order = 'p_M6'
+
+        # - Extract splitting form self.method (default strang) -
+        splitting = 'strang'
+        for s in ['classic', 'strang', 'particle']:
+            if self.method[Splitting].find(s) >= 0:
+                splitting = s
+
+        return order, splitting
+
     def discretize(self):
         """
-        Discretisation according to the chosen method.
+        Discretisation (create topologies and discretize fields)
         Available methods :
         - 'scales' : SCALES fortran routines (3d only, list of vector
         and/or scalar)
-          - 'p_O2' : order 4 method, corrected to allow large CFL number,
-          untagged particles
-          - 'p_O4' : order 4 method, corrected to allow large CFL number,
-          untagged particles
-          - 'p_L2' : limited and corrected lambda 2
-          - 'p_M4' : Lambda_2,1 (=M'4) 4 point formula
-          - 'p_M6' (default) : Lambda_4,2 (=M'6) 6 point formula
-          - 'p_M8' : M8prime formula
-          - 'p_44' : Lambda_4,4 formula
-          - 'p_64' : Lambda_6,4 formula
-          - 'p_66' : Lambda_6,6 formula
-          - 'p_84' : Lambda_8,4 formula
         - 'gpu' : OpenCL kernels (2d and 3d, single field, scalar or vector)
-          - Kernels versions:
-            - '1k' : Single OpenCL kernel for advection and remeshing
-            - '2k' : Separate kernels
-          - Integration method:
-            - 'rk2' : Runge Kutta 2nd order advection
-            - 'rk4' : Runge Kutta 4th order advection
-          - remeshing formula:
-            - 'm4prime' : = 'l2_1'
-            - 'l2_1' : Labmda2,1 : (=M'4) 4 point formula, C1 regularity
-            - 'l2_2' : Labmda2,2 : 4 point formula, C2 regularity
-            - 'm6prime' : = 'l4_2'
-            - 'l4_2' : Labmda4,2 : (=M'6) 6 point formula, C2 regularity
-            - 'l4_3' : Labmda4,3 : 6 point formula, C3 regularity
-            - 'l4_4' : Labmda4,4 : 6 point formula, C4 regularity
-            - 'l6_3' : Labmda6,3 : 8 point formula, C3 regularity
-            - 'l6_4' : Labmda6,4 : 8 point formula, C4 regularity
-            - 'l6_5' : Labmda6,5 : 8 point formula, C5 regularity
-            - 'l6_6' : Labmda6,6 : 8 point formula, C6 regularity
-            - 'l8_4' : Labmda8,4 : 10 point formula, C4 regularity
-            - 'm8prime' : M8prime formula
         - other : Pure python (2d and 3d, list of vector and/or scalar)
-          - Integration method:
-            - 'rk2' : Runge Kutta 2nd order advection
-            - 'rk4' : Runge Kutta 4th order advection
-          - remeshing formula:
-            - 'm4prime' : = 'l2_1'
-            - 'l2_1' : Labmda2,1 : (=M'4) 4 point formula, C1 regularity
-            - 'l2_2' : Labmda2,2 : 4 point formula, C2 regularity
-            - 'm6prime' : = 'l4_2'
-            - 'l4_2' : Labmda4,2 : (=M'6) 6 point formula, C2 regularity
-            - 'l4_3' : Labmda4,3 : 6 point formula, C3 regularity
-            - 'l4_4' : Labmda4,4 : 6 point formula, C4 regularity
-            - 'l6_3' : Labmda6,3 : 8 point formula, C3 regularity
-            - 'l6_4' : Labmda6,4 : 8 point formula, C4 regularity
-            - 'l6_5' : Labmda6,5 : 8 point formula, C5 regularity
-            - 'l6_6' : Labmda6,6 : 8 point formula, C6 regularity
-            - 'l8_4' : Labmda8,4 : 10 point formula, C4 regularity
-            - 'm8prime' : M8prime formula
         """
-        # --- Advection solver from SCALES ---
-        if self._isSCALES:
-            if not self._dim == 3:
-                raise ValueError("Scales Advection not implemented in 2D.")
-            if self.ghosts is not None:
-                if (self.ghosts > 0).any():
-                    raise ValueError("Scales Advection not\
-                    yet implemented with ghosts points.")
-            # - Scales imports -
-            from parmepy.f2py import scales2py as scales
-
-            # - Extract order form self.method (default p_M6) -
-            order = None
-            for o in ['p_O2', 'p_O4', 'p_L2',
-                      'p_M4', 'p_M6', 'p_M8',
-                      'p_44', 'p_64', 'p_66', 'p_84']:
-                if self.method[Scales].find(o) >= 0:
-                    order = o
-            if order is None:
-                print ('Unknown advection method, turn to default (p_M6).')
-                order = 'p_M6'
-            # - Extract splitting form self.method (default strang) -
-            splitting = 'strang'
-            for s in ['classic', 'strang', 'particle']:
-                if self.method[Splitting].find(s) >= 0:
-                    splitting = s
-
-            # - Create the topologies (get param from scales) -
-            # Scales nbcells equals resolutions - 1
-            nbcells = np.asarray(self.resolutions[self.advectedFields[0]],
-                                 dtype=PARMES_INDEX) - 1
-            if self.topology is not None:
-                main_size = self.topology.size
-                comm = self.topology.comm
-                topodims = self.topology.shape
-            elif self._comm is not None:
-                main_size = self._comm.Get_size()
-                comm = self._comm
-                topodims = [1, 1, main_size]
-            else:
-                from parmepy.mpi.main_var import main_size
-                from parmepy.mpi.main_var import main_comm as comm
-                topodims = [1, 1, main_size]
-            scalesres, scalesoffset = \
-                scales.init_advection_solver(nbcells,
-                                             self.domain.length,
-                                             topodims, comm.py2f(),
-                                             order=order,
-                                             dim_split=splitting)
-            # Use same topodims as scales to create Cartesian topology
-            # in order to discretize our fields
-            if self.topology is not None:
-                assert (self.topology.shape == topodims).all(),\
-                    'input topology is not scales compliant.'
-                self._discretize_single_topo()
-            else:
+        if self._is_discretized:
+            return
+
+        if self._is_scales:
+            self._scales_discretize()
+        else:
+            self._no_scales_discretize()
+        advected_discrete_fields = [self.discreteFields[f]
+                                    for f in self.advected_fields]
+        toporef = advected_discrete_fields[0].topology
+        msg = 'All advected fields must have the same topology.'
+        for f in advected_discrete_fields:
+            assert f.topology == toporef, msg
+
+        if self._single_topo:
+            self.method[MultiScale] = None
+
+    @debug
+    def _scales_discretize(self):
+        """
+        Discretization (create topologies and discretize fields)
+        when using SCALES fortran routines (3d only, list of vector
+        and/or scalar)
+        - 'p_O2' : order 4 method, corrected to allow large CFL number,
+        untagged particles
+        - 'p_O4' : order 4 method, corrected to allow large CFL number,
+        untagged particles
+        - 'p_L2' : limited and corrected lambda 2
+        - 'p_M4' : Lambda_2,1 (=M'4) 4 point formula
+        - 'p_M6' (default) : Lambda_4,2 (=M'6) 6 point formula
+        - 'p_M8' : M8prime formula
+        - 'p_44' : Lambda_4,4 formula
+        - 'p_64' : Lambda_6,4 formula
+        - 'p_66' : Lambda_6,6 formula
+        - 'p_84' : Lambda_8,4 formula
+        """
+        assert self._is_scales
+        # - Extract order form self.method (default p_M6) -
+        order, splitting = self.scales_parameters()
+
+        # Check if topos need to be created
+        build_topos = self._check_variables()
+        from parmepy.f2py import scales2py as scales
+
+        # Scales, single resolution
+        if self._single_topo:
+            if build_topos:
+                # In that case, self._discretization must be
+                # a Discretization object, used for all fields.
+                # We use it to initialize scales solver
+                topo = self._create_scales_topo(self._discretization,
+                                                order, splitting)
                 for v in self.variables:
-                    # the topology for v ...
-                    topo = \
-                        self.domain.getOrCreateTopology(self._dim,
-                                                        self.resolutions[v],
-                                                        topodims,
-                                                        precomputed=True,
-                                                        offset=scalesoffset,
-                                                        localres=scalesres,
-                                                        ghosts=self.ghosts,
-                                                        comm=self._comm)
-                    # ... and the corresponding discrete field
-                    self.discreteFields[v] = v.discretize(topo)
-            if self._isMultiScale:
-                self.config['isMultiscale'] = self._isMultiScale
-                v_shape = np.asarray(self.resolutions[self.velocity],
-                                     dtype=PARMES_INDEX) - 1
-                scales.init_multiscale(v_shape[0], v_shape[1], v_shape[2],
-                                       self.method[MultiScale])
-            self._my_setUp = self.setUp_Scales
-
-        # --- GPU or pure-python advection ---
+                    self.variables[v] = topo
+            else:
+                # In that case, self._discretization must be
+                # a Cartesian object, used for all fields.
+                # We use it to initialize scales solver
+                assert isinstance(self._discretization, Cartesian)
+                topo = self._discretization
+                msg = 'input topology is not compliant with scales.'
+                #assert topo.dimension == 1, msg
+                msg = 'Ghosts points not yet implemented for scales operators.'
+                assert (topo.mesh.discretization.ghosts == 0).all(), msg
+
+                nbcells = topo.mesh.discretization.resolution - 1
+                topodims = topo.shape
+                scalesres, global_start = \
+                    scales.init_advection_solver(nbcells,
+                                                 self.domain.length,
+                                                 npw.asintarray(topodims),
+                                                 self._mpis.comm.py2f(),
+                                                 order=order,
+                                                 dim_split=splitting)
+
+                assert (topo.shape == topodims).all()
+                assert (topo.mesh.resolution == scalesres).all()
+                assert (topo.mesh.start() == global_start).all()
+
+            msg = 'Scales Advection not yet implemented with ghosts points.'
+            assert (topo.ghosts() == 0).all(), msg
+
+        # Scales, multi-resolution
         else:
-            for i in xrange(self._dim):
-                self.advecDir[i].discretize()
-            self.discreteFields = self.advecDir[0].discreteFields
-            self._my_setUp = self.setUp_Python
+            if build_topos[self.velocity]:
+                # Resolution used for velocity
+                v_resol = self.variables[self.velocity].resolution - 1
+
+            else:
+                topo = self.variables[self.velocity]
+                v_resol = topo.mesh.discretization.resolution
 
-    @staticmethod
-    def getWorkLengths(method=None, domain_dim=None):
+            vbuild = [v for v in self.variables if build_topos[v]]
+            for v in vbuild:
+                self.variables[v] = self._create_scales_topo(
+                    self.variables[v], order, splitting)
+
+            topo = self.variables.values()[0]
+            self._check_scales_topo(topo, order, splitting)
+
+            # Init multiscale in scales
+            scales.init_multiscale(v_resol[0], v_resol[1], v_resol[2],
+                                   self.method[MultiScale])
+
+        # All topos are built, we can discretize fields.
+        self._discretize_vars()
+
+    def _create_scales_topo(self, d3d, order, splitting):
+        from parmepy.f2py import scales2py as scales
+        comm = self._mpis.comm
+        topodims = [1, 1, comm.Get_size()]
+        msg = 'Wrong type for parameter discretization (at init).' + str(self._discretization)
+        assert isinstance(d3d, Discretization), msg
+        nbcells = d3d.resolution - 1
+        scalesres, global_start = \
+            scales.init_advection_solver(nbcells,
+                                         self.domain.length,
+                                         npw.asintarray(topodims),
+                                         comm.py2f(),
+                                         order=order,
+                                         dim_split=splitting)
+        # Create the parmes topo (plane, cut through ZDIR)
+        return self.domain.create_plane_topology_from_mesh(
+            global_start=global_start, localres=scalesres,
+            discretization=d3d, cdir=ZDIR)
+
+    def _check_scales_topo(self, toporef, order, splitting):
+        from parmepy.f2py import scales2py as scales
+        # In that case, self._discretization must be
+        # a Cartesian object, used for all fields.
+        # We use it to initialize scales solver
+        comm = self._mpis.comm
+        topodims = [1, 1, comm.Get_size()]
+        nbcells = toporef.mesh.discretization.resolution - 1
+
+        scalesres, global_start = \
+            scales.init_advection_solver(nbcells, self.domain.length,
+                                         npw.asintarray(toporef.shape),
+                                         comm.py2f(),
+                                         order=order, dim_split=splitting)
+        for v in self.variables:
+            topo = self.variables[v]
+            assert isinstance(topo, Cartesian), str(topo)
+            assert (topo.shape == topodims).all(), \
+                str(topo.shape) + ' != ' + str(topodims)
+            assert not self._single_topo or (topo.mesh.resolution == scalesres).all(), \
+                str(topo.mesh.resolution) + ' != ' + str(scalesres)
+            assert not self._single_topo or (topo.mesh.start() == global_start).all(), \
+                str(topo.mesh.start()) + ' != ' + str(global_start)
+
+    def _no_scales_discretize(self):
+        """
+        GPU or pure-python advection
+        """
+        if not self._is_discretized:
+            for i in xrange(self.domain.dimension):
+                self.advec_dir[i].discretize()
+            self.discreteFields = self.advec_dir[0].discreteFields
+            self._single_topo = self.advec_dir[0]._single_topo
+            self._is_discretized = True
+
+    def get_work_properties(self):
         """
         Return the length of working arrays lists required
-        for advction discrete operator, depending on :
-        - the time integrator (RK2, ...)
-        - the interpolation (which depends on domain dimension)
-        - the remeshing (which depends on domain dimension)
-        @param method : the dict of parameters for the operator.
-        Default = parmepy.default_methods.ADVECTION
+        for the discrete operator.
+        @return shapes, shape of the arrays:
+        shapes['rwork'] == list of shapes for real arrays,
+        shapes['iwork'] == list of shapes for int arrays.
+        len(shapes['...'] gives the number of required arrays.
         """
-        if method is None:
-            method = default.ADVECTION
-        assert Interpolation in method,\
-            'An interpolation is required for the advection method.'
-        assert TimeIntegrator in method,\
-            'A time integrator is required for the advection method.'
-        assert Remesh in method,\
-            'A remesh is required for the advection method.'
-        tw = method[TimeIntegrator].getWorkLengths(1)
-        iw, iiw = method[Interpolation].getWorkLengths(domain_dim=domain_dim)
-        rw, riw = method[Remesh].getWorkLengths(domain_dim=domain_dim)
-        return max(tw + iw, rw), max(iiw, riw)
-
-    def setWorks(self, rwork=None, iwork=None):
-        if rwork is None:
-            rwork = []
-        if iwork is None:
-            iwork = []
-        if not self._isSCALES:
-            for i in xrange(self._dim):
-                self.advecDir[i].setWorks(rwork, iwork)
-
-    def setUp(self):
-        if not self._isUpToDate:
-            self._my_setUp()
-
-    def setUp_Scales(self):
-        advectedDiscreteFields = [self.discreteFields[f]
-                                  for f in self.advectedFields]
-        # - Create the discreteOperator from the
+        if self._is_scales:
+            return {'rwork': None, 'iwork': None}
+        else:
+            if not self.advec_dir[0]._is_discretized:
+                msg = 'The operator must be discretized '
+                msg += 'before any call to this function.'
+                raise RuntimeError(msg)
+            return self.advec_dir[0].get_work_properties()
+
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
+        # Check resolutions to set multiscale case, if required.
+        if not self._single_topo and MultiScale not in self.method:
+            self.method[MultiScale] = L2_1
+        if not self._is_uptodate:
+            self._my_setup(rwork, iwork)
+
+    def _setup_scales(self, rwork=None, iwork=None):
+
+        advected_discrete_fields = [self.discreteFields[f]
+                                    for f in self.advected_fields]
+        # - Create the discrete_op from the
         # list of discrete fields -
         from parmepy.operator.discrete.scales_advection import \
             ScalesAdvection
-        self.discreteOperator = ScalesAdvection(
+        self.discrete_op = ScalesAdvection(
             self.discreteFields[self.velocity],
-            advectedDiscreteFields, method=self.method,
+            advected_discrete_fields, method=self.method,
+            rwork=rwork, iwork=iwork,
             **self.config)
+        self._is_uptodate = True
 
-        # -- Final set up --
-        self.discreteOperator.setUp()
-        self._isUpToDate = True
-
-    def setUp_Python(self):
-        for i in xrange(self._dim):
-            self.advecDir[i].setUp()
-        # If topologies differs between directions,
-        # one need Redistribute
-        # operators
-        main_size = self.advecDir[0].discreteFields[
-            self.velocity].topology.size
-        if main_size > 1:
-            # Build bridges
-            self.bridges = self._dim * [None]
-            if main_size > 1:
-                for dfrom in xrange(self.domain.dimension):
-                    self.bridges[dfrom] = self._dim * [None]
-                    for dto in xrange(self._dim):
-                        if dfrom == dto:
-                            self.bridges[dfrom][dto] = None
-                        else:
-                            nsuffix = str(dfrom) + '_' + str(dto)
-                            self.bridges[dfrom][dto] = Redistribute(
-                                variables=self.advectedFields,
-                                opFrom=self.advecDir[dfrom],
-                                opTo=self.advecDir[dto],
-                                name_suffix=nsuffix)
-                            self.bridges[dfrom][dto].setUp()
-
-        # Splitting configuration
+    def _setup_advec_dir(self, rwork=None, iwork=None):
+        """
+        Local allocation of work arrays,
+        common to advec_dir operators and setup for those
+        operators
+        """
+        wk_p = self.advec_dir[0].get_work_properties()
+        wk_length = len(wk_p['rwork'])
+        if rwork is None:
+            rwork = []
+            for i in xrange(wk_length):
+                memshape = wk_p['rwork'][i]
+                rwork.append(npw.zeros(memshape))
+        else:
+            assert len(rwork) == wk_length
+            for wk, refshape in zip(rwork, wk_p['rwork']):
+                assert wk.shape == refshape
+        wk_length = len(wk_p['iwork'])
+        if iwork is None:
+            iwork = []
+            for i in xrange(wk_length):
+                memshape = wk_p['iwork'][i]
+                iwork.append(npw.int_zeros(memshape))
+        else:
+            assert len(iwork) == wk_length
+            for wk, refshape in zip(iwork, wk_p['iwork']):
+                assert wk.shape == refshape
+        # Work arrays are common between all directions
+        # of advection.
+        for i in xrange(self.domain.dimension):
+            self.advec_dir[i].setup(rwork, iwork)
+
+    def _setup_python(self, rwork=None, iwork=None):
+
+        # setup for advection in each direction
+        self._setup_advec_dir(rwork, iwork)
+        # set splitting parameters (depends on method)
+        self._configure_splitting()
+
+        # configure gpu
+        if self.method[Support].find('gpu') >= 0:
+            self._configure_gpu()
+
+        self._is_uptodate = True
+
+    def _configure_splitting(self):
+        dimension = self.domain.dimension
         if self.method[Splitting] == 'o2_FullHalf':
             ## Half timestep in all directions
             [self.splitting.append((i, 0.5))
-             for i in xrange(self._dim)]
-            [self.splitting.append((self._dim - 1 - i, 0.5))
-             for i in xrange(self._dim)]
+             for i in xrange(dimension)]
+            [self.splitting.append((dimension - 1 - i, 0.5))
+             for i in xrange(dimension)]
         elif self.method[Splitting] == 'o1':
-            [self.splitting.append((i, 1.)) for i in xrange(self._dim)]
+            [self.splitting.append((i, 1.)) for i in xrange(dimension)]
         elif self.method[Splitting] == 'x_only':
             self.splitting.append((0, 1.))
         elif self.method[Splitting] == 'y_only':
@@ -365,86 +439,63 @@ class Advection(Operator):
         elif self.method[Splitting] == 'o2':
             ## Half timestep in all directions but last
             [self.splitting.append((i, 0.5))
-             for i in xrange(self._dim - 1)]
-            self.splitting.append((self._dim - 1, 1.))
-            [self.splitting.append((self._dim - 2 - i, 0.5))
-             for i in xrange(self._dim - 1)]
+             for i in xrange(dimension - 1)]
+            self.splitting.append((dimension - 1, 1.))
+            [self.splitting.append((dimension - 2 - i, 0.5))
+             for i in xrange(dimension - 1)]
         else:
             raise ValueError('Unknown splitting configuration:' +
                              self.method[Splitting])
 
-        if self.method[Support].find('gpu') >= 0:
-            splitting_nbSteps = len(self.splitting)
-            for d in xrange(self.domain.dimension):
-                dOp = self.advecDir[d].discreteOperator
-                assert len(dOp.exec_list) == splitting_nbSteps, \
-                    "Discrete operator execution " + \
-                    "list and splitting steps sizes must be equal " + \
-                    str(len(dOp.exec_list)) + " != " + \
-                    str(splitting_nbSteps)
-
-        if main_size > 1:
-            self.apply = self._apply_Comm
-        else:
-            self.apply = self._apply_noComm
-        self._isUpToDate = True
-
-        if self.method[Support].find('gpu') >= 0:
-            s = ""
-            device_id = self.advecDir[d].discreteOperator.cl_env._device_id
-            gpu_comm = self.advecDir[d].discreteOperator.cl_env.gpu_comm
-            gpu_rank = gpu_comm.Get_rank()
+    def _configure_gpu(self):
+        splitting_nbSteps = len(self.splitting)
+        for d in xrange(self.domain.dimension):
+            dOp = self.advec_dir[d].discrete_op
+            assert len(dOp.exec_list) == splitting_nbSteps, \
+                "Discrete operator execution " + \
+                "list and splitting steps sizes must be equal " + \
+                str(len(dOp.exec_list)) + " != " + \
+                str(splitting_nbSteps)
+        s = ""
+        device_id = self.advec_dir[0].discrete_op.cl_env._device_id
+        gpu_comm = self.advec_dir[0].discrete_op.cl_env.gpu_comm
+        gpu_rank = gpu_comm.Get_rank()
+        if gpu_rank == 0:
+            s += "=== OpenCL buffers allocated"
+            s += " on Device:{0} ===\n".format(device_id)
+            s += "Global memory used:\n"
+        total_gmem = 0
+        for d in xrange(self.domain.dimension):
+            g_mem_d = 0
+            # allocate all variables in advec_dir
+            for df in self.advec_dir[d].discrete_op.variables:
+                if not df.gpu_allocated:
+                    df.allocate()
+                    g_mem_df = gpu_comm.allreduce(df.mem_size)
+                    g_mem_d += g_mem_df
             if gpu_rank == 0:
-                s += "=== OpenCL buffers allocated"
-                s += " on Device:{0} ===\n".format(device_id)
-                s += "Global memory used:\n"
-            total_gmem = 0
-            for d in xrange(self.domain.dimension):
-                g_mem_d = 0
-                for df in self.advecDir[d].discreteOperator.variables:
-                    if not df.gpu_allocated:
-                        df.allocate()
-                        g_mem_df = gpu_comm.allreduce(df.mem_size)
-                        g_mem_d += g_mem_df
-                if gpu_rank == 0:
-                    s += " Advection" + S_DIR[d] + ": {0:9d}".format(g_mem_d)
-                    s += "Bytes ({0:5d} MB)\n".format(g_mem_d / (1024 ** 2))
-                total_gmem += g_mem_d
+                s += " Advection" + S_DIR[d] + ": {0:9d}".format(g_mem_d)
+                s += "Bytes ({0:5d} MB)\n".format(g_mem_d / (1024 ** 2))
+            total_gmem += g_mem_d
+        if gpu_rank == 0:
+            s += " Total      : {0:9d}".format(total_gmem)
+            s += "Bytes ({0:5d} MB)\n".format(total_gmem / (1024 ** 2))
+            s += "Local memory used:\n"
+        total_lmem = 0
+        for d in xrange(self.domain.dimension):
+            l_mem_d = gpu_comm.allreduce(
+                self.advec_dir[d].discrete_op.size_local_alloc)
             if gpu_rank == 0:
-                s += " Total      : {0:9d}".format(total_gmem)
-                s += "Bytes ({0:5d} MB)\n".format(total_gmem / (1024 ** 2))
-                s += "Local memory used:\n"
-            total_lmem = 0
-            for d in xrange(self.domain.dimension):
-                l_mem_d = gpu_comm.allreduce(
-                    self.advecDir[d].discreteOperator.size_local_alloc)
-                if gpu_rank == 0:
-                    s += " Advection" + S_DIR[d] + ": {0:9d}".format(l_mem_d)
-                    s += "Bytes ({0:5d} MB)\n".format(l_mem_d / (1024 ** 2))
-                total_lmem += l_mem_d
-            if gpu_rank == 0:
-                s += " Total      : {0:9d}".format(total_lmem) + "Bytes"
-                print (s)
-
-    @debug
-    def _apply_noComm(self, simulation=None):
-        """
-        Apply this operator to its variables.
-        @param simulation : object that describes the simulation
-        parameters (time, time step, iteration number ...), see
-        parmepy.problem.simulation.Simulation for details.
-
-        Redefinition for advection. Applying a dimensional splitting.
-        """
-        for req in self.requirements:
-            req.wait()
-        for split_id, split in enumerate(self.splitting):
-            self.advecDir[split[0]].apply(
-                simulation, split[1], split_id, self._old_dir)
-            self._old_dir = split[0]
+                s += " Advection" + S_DIR[d] + ": {0:9d}".format(l_mem_d)
+                s += "Bytes ({0:5d} MB)\n".format(l_mem_d / (1024 ** 2))
+            total_lmem += l_mem_d
+        if gpu_rank == 0:
+            s += " Total      : {0:9d}".format(total_lmem) + "Bytes"
+            print s
 
     @debug
-    def _apply_Comm(self, simulation=None):
+    @opapply
+    def _apply_python(self, simulation=None):
         """
         Apply this operator to its variables.
         @param simulation : object that describes the simulation
@@ -453,14 +504,9 @@ class Advection(Operator):
 
         Redefinition for advection. Applying a dimensional splitting.
         """
-        for req in self.requirements:
-            req.wait()
+        assert simulation is not None
         for split_id, split in enumerate(self.splitting):
-            # Calling the redistribute operators between directions
-            if not self._old_dir == split[0]:
-                self.bridges[self._old_dir][split[0]].apply()
-                self.bridges[self._old_dir][split[0]].wait()
-            self.advecDir[split[0]].apply(
+            self.advec_dir[split[0]].apply(
                 simulation, split[1], split_id, self._old_dir)
             self._old_dir = split[0]
 
@@ -469,24 +515,31 @@ class Advection(Operator):
         """
         Memory cleaning.
         """
-        if self._isSCALES:
-            Operator.finalize(self)
+        if self._is_scales:
+            Computational.finalize(self)
         else:
-            for dop in self.advecDir:
+            for dop in self.advec_dir:
                 dop.finalize()
-                self.timer = self.timer + dop.timer
+
+    def get_profiling_info(self):
+        if self._is_uptodate:
+            if self._is_scales:
+                self.profiler += self.discrete_op.profiler
+            else:
+                for dop in self.advec_dir:
+                    self.profiler += dop.profiler
 
     def __str__(self):
         """
         Common printings for operators
         """
         shortName = str(self.__class__).rpartition('.')[-1][0:-2]
-        if self._isSCALES:
-            s = Operator.__str__(self)
+        if self._is_scales:
+            super(Advection, self).__str__()
         else:
-            for i in xrange(self._dim):
-                if self.advecDir[i].discreteOperator is not None:
-                    s = str(self.advecDir[i].discreteOperator)
+            for i in xrange(self.domain.dimension):
+                if self.advec_dir[i].discrete_op is not None:
+                    s = str(self.advec_dir[i].discrete_op)
                 else:
                     s = shortName + " operator. Not discretised."
         return s + "\n"
diff --git a/HySoP/hysop/operator/advection_dir.py b/HySoP/hysop/operator/advection_dir.py
index 73ad4a20d28da2df56ef5c0eaf11a6ab8fc8f54b..d08deff01e0fbdf7613932892e2cd39c28b3ca90 100644
--- a/HySoP/hysop/operator/advection_dir.py
+++ b/HySoP/hysop/operator/advection_dir.py
@@ -3,18 +3,18 @@
 
 Advection of a field in a single direction.
 """
-from parmepy.constants import debug, np, S_DIR, PARMES_INDEX
-from parmepy.methods_keys import TimeIntegrator, Interpolation, Remesh, \
-    Support, MultiScale
-from parmepy.numerics.interpolation import Linear
-from parmepy.numerics.remeshing import L2_1, L4_2, L4_4
-from parmepy.operator.continuous import Operator
-from parmepy.tools.timers import Timer
+from parmepy.constants import debug, S_DIR
+from parmepy.methods_keys import Support, MultiScale, \
+    TimeIntegrator, Interpolation, Remesh
+from parmepy.numerics.remeshing import L2_1, L4_2, L4_4, Remeshing, Linear
+from parmepy.operator.computational import Computational
 # To get default method values for advection:
 import parmepy.default_methods as default
+import numpy as np
+from parmepy.operator.continuous import opsetup, opapply
 
 
-class AdvectionDir(Operator):
+class AdvectionDir(Computational):
     """
     Advection of a field,
     \f{eqnarray*}
@@ -29,172 +29,215 @@ class AdvectionDir(Operator):
     """
 
     @debug
-    def __init__(self, velocity, advectedFields, d, particle_fields,
-                 particle_positions, name_suffix='',
-                 isMultiScale=False, **kwds):
-
-        v = [velocity]
-        if isinstance(advectedFields, list):
-            advectedFields = advectedFields
+    def __init__(self, velocity, direction, advected_fields=None,
+                 name_suffix='', cutdir=None, **kwds):
+        ## Get the other arguments to pass to the discrete operators
+        self._kwds = kwds.copy()
+        self._kwds.pop('discretization')
+        ## Transport velocity
+        self.velocity = velocity
+        if 'variables' in kwds:
+            self._kwds.pop('variables')
+            kw = kwds.copy()
+            kw['variables'] = kwds['variables'].copy()
+            # In that case, variables must contains only the advected fields
+            # with their discretization param.
+            # Velocity must always be given outside variables, with its
+            # own discretization.
+            assert advected_fields is None, 'too many input arguments.'
+            self.advected_fields = kwds['variables'].keys()
+            kw['variables'][self.velocity] = kwds['discretization']
+            kw.pop('discretization')
+            super(AdvectionDir, self).__init__(**kw)
         else:
-            advectedFields = [advectedFields]
-        [v.append(f) for f in advectedFields]
-        super(AdvectionDir, self).__init__(variables=v, **kwds)
+            v = [self.velocity]
+            if isinstance(advected_fields, list):
+                self.advected_fields = advected_fields
+            else:
+                self.advected_fields = [advected_fields]
+            v += self.advected_fields
+            super(AdvectionDir, self).__init__(variables=v, **kwds)
+
+        # Set default method, if required
         if self.method is None:
             self.method = default.ADVECTION
-        if isMultiScale and not MultiScale in self.method.keys():
-            print ("Using default mutiscale interpolation : L2_1")
-            self.method[MultiScale] = L2_1
+        self.output = self.advected_fields
+        self.input = [var for var in self.variables]
+
         from parmepy.methods_keys import Splitting
-        if not Splitting in self.method.keys():
+        if Splitting not in self.method.keys():
             self.method[Splitting] = 'o2'
-        self.config = {}
-        self.name += name_suffix + S_DIR[d]
-        if self.method is None:
-            self.method = default.ADVECTION
-        if self.topology is not None:
-            raise ValueError("User defined topology is not\
-               allowed for advecDir.")
+        self.name += name_suffix + S_DIR[direction]
 
-        self.output = advectedFields
-        self.input = [var for var in self.variables]
-        ## Transport velocity
-        self.velocity = velocity
         ## direction to advect
-        self.dir = d
-        self._isMultiScale = isMultiScale
-        self._v_ghosts = None
-        if self._isMultiScale:
-            if self.method[Support].find('gpu') < 0:
-                raise ValueError("Multiscale advection is not supported in "
-                                 "Python yet, user should use Scales or GPU.")
-            if not MultiScale in self.method.keys():
-                self.method[MultiScale] = L2_1
-            if self.method[MultiScale] == Linear:
-                self._v_ghosts = np.array([1, ] * self.domain.dimension,
-                                          dtype=PARMES_INDEX)
-            elif self.method[MultiScale] == L2_1:
-                self._v_ghosts = np.array([2, ] * self.domain.dimension,
-                                          dtype=PARMES_INDEX)
-            elif self.method[MultiScale] == L4_2 or \
-                    self.method[MultiScale] == L4_4:
-                self._v_ghosts = np.array([3, ] * self.domain.dimension,
-                                          dtype=PARMES_INDEX)
-            else:
-                raise ValueError("Unknown multiscale method")
+        self.direction = direction
+
+        ## Fields on particles
+        self.particle_fields = None
+
+        ## Positions of the particles
+        self.particle_positions = None
 
-        self.particle_fields = particle_fields
-        self.particle_positions = particle_positions
+        self._default_cutdir = cutdir
 
     @debug
     def discretize(self):
+        if self._is_discretized:
+            return
+
+        build_topos = self._check_variables()
+
+        # Check if multiscale is available
+        if not self._single_topo:
+            if self.method[Support].find('gpu') < 0:
+                raise ValueError("Multiscale advection is not yet supported "
+                                 "in pure Python, use Scales or GPU.")
+
+        ## Topology cutdir for parallel advection
+        if self._default_cutdir is None:
+            cutdir = [False] * self.domain.dimension
+            cutdir[-1] = True
+        else:
+            cutdir = self._default_cutdir
+
+        if self._single_topo:
+            # One topo for all fields ...
+            self.method[MultiScale] = None
+            if build_topos:
+                topo = self.domain.create_topology(
+                    discretization=self._discretization, cutdir=cutdir)
+                for v in self.variables:
+                    self.variables[v] = topo
+            else:
+                # Topo is already built, just check if it is 1D
+                topo = self.variables.values()[0]
+                msg = str(topo.cutdir) + ' != ' + str(cutdir)
+                assert (topo.cutdir == cutdir).all(), msg
+
+        else:
+            # ... or one topo for each field.
+            for v in self.variables:
+                if build_topos[v]:
+                    topo = self.domain.create_topology(
+                        discretization=self.variables[v], cutdir=cutdir)
+                    self.variables[v] = topo
+                    build_topos[v] = False
+            # compute velocity minimal ghost layer size
+            self._check_ghost_layer(build_topos)
+
+        # All topos are built, we can discretize fields.
+        self._discretize_vars()
+
+        self._is_discretized = True
+
+    def _check_ghost_layer(self, build_topos):
         """
-        Discretisation according to the chosen method.
-        Available methods : See Advection.setUp
+        Only meaningful if fields have different resolutions.
+        Check/set interpolation method for multiscale and
+        set ghost layer size, if required.
         """
-        if self._comm is not None:
-            main_size = self._comm.Get_size()
+        # Set method to default if unknown
+        if MultiScale not in self.method.keys():
+            self.method[MultiScale] = L2_1
+
+        mscale = self.method[MultiScale]
+        if mscale == Linear:
+            min_ghosts = 1
+        elif mscale == L2_1:
+            min_ghosts = 2
+        elif mscale == L4_2 or mscale == L4_4:
+            min_ghosts = 3
         else:
-            from parmepy.mpi import main_size
+            raise ValueError("Unknown multiscale method")
 
-        topodims = np.ones((self.domain.dimension))
-        # MPI topology depends on direction
-        if self.dir == self.domain.dimension - 1:
-            # Cut in first dir for last dir computations
-            topodims[0] = main_size
+        # Topo or resolution associated with velocity
+        discr_v = self.variables[self.velocity]
+        if build_topos[self.velocity]:
+            # discr_v = Discretization
+            ghosts_v = discr_v.ghosts
         else:
-            # Cut in last dir
-            topodims[-1] = main_size
-        for v in self.variables:
-            if v == self.velocity:
-                topo = self.domain.getOrCreateTopology(
-                    1, self.resolutions[v], topoResolution=topodims,
-                    fixedResolution=True, ghosts=self._v_ghosts,
-                    comm=self._comm)
+            # discr_v = Cartesian
+            ghosts_v = discr_v.ghosts()
+        msg = 'Ghost layer required for velocity. Size min = '
+        msg += str(min_ghosts) + " (" + str(ghosts_v) + " given)"
+        assert (ghosts_v >= min_ghosts).all(), msg
+
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
+        # select discretization of the advected fields
+        advected_discrete_fields = [self.discreteFields[v]
+                                    for v in self.variables
+                                    if v is not self.velocity]
+        # GPU advection ...
+        comm_size = advected_discrete_fields[0].topology.comm.Get_size()
+        if self.method[Support].find('gpu') >= 0:
+            topo_shape = advected_discrete_fields[0].topology.shape
+            if topo_shape[self.direction] == 1:
+                from parmepy.gpu.gpu_particle_advection \
+                    import GPUParticleAdvection as advec
             else:
-                topo = self.domain.getOrCreateTopology(
-                    1, self.resolutions[v], topoResolution=topodims,
-                    fixedResolution=True, ghosts=self.ghosts,
-                    comm=self._comm)
-            self.discreteFields[v] = v.discretize(topo)
-
-    @staticmethod
-    def getWorkLengths(method=None, domain_dim=None):
-        """
-        Return the length of working arrays lists required
-        for advction discrete operator, depending on :
-        - the time integrator (RK2, ...)
-        - the interpolation (which depends on domain dimension)
-        - the remeshing (which depends on domain dimension)
-        @param method : the dict of parameters for the operator.
-        Default = parmepy.default_methods.ADVECTION
-        """
-        if method is None:
-            method = default.ADVECTION
-        assert Interpolation in method,\
-            'An interpolation is required for the advection method.'
-        assert TimeIntegrator in method,\
-            'A time integrator is required for the advection method.'
-        assert Remesh in method,\
-            'A remesh is required for the advection method.'
-        tw = method[TimeIntegrator].getWorkLengths(1)
-        iw, iiw = method[Interpolation].getWorkLengths(domain_dim=domain_dim)
-        rw, riw = method[Remesh].getWorkLengths(domain_dim=domain_dim)
-        return max(tw + iw, rw), max(iiw, riw)
-
-    def setWorks(self, rwork=None, iwork=None):
-        if rwork is None:
-            rwork = []
-        if iwork is None:
-            iwork = []
-        self.discreteOperator.setWorks(rwork, iwork)
-
-    def setUp(self):
-        advectedDiscreteFields = [self.discreteFields[v]
-                                  for v in self.variables
-                                  if not v is self.velocity]
-        topo = advectedDiscreteFields[0].topology
-        particles_advectedDiscreteFields = [v.discretize(topo)
-                                            for v in self.particle_fields]
-        particles_positionsDiscreteField = None
-        if self.particle_positions is not None:
-            particles_positionsDiscreteField =\
-                self.particle_positions.discretize(topo)
-
-        if self.method[Support].find('gpu_2k') >= 0:
-            from parmepy.gpu.gpu_particle_advection_2k \
-                import GPUParticleAdvection2k as advec
-        elif self.method[Support].find('gpu_1k') >= 0:
-            from parmepy.gpu.gpu_particle_advection_1k \
-                import GPUParticleAdvection1k as advec
+                from parmepy.gpu.multi_gpu_particle_advection \
+                    import MultiGPUParticleAdvection as advec
         else:
+            # pure-python advection
             from parmepy.operator.discrete.particle_advection \
                 import ParticleAdvection as advec
 
-        self.discreteOperator = advec(
-            self.discreteFields[self.velocity],
-            advectedDiscreteFields, self.dir, method=self.method,
-            part_position=particles_positionsDiscreteField,
-            part_advectedFields=particles_advectedDiscreteFields,
-            isMultiScale=self._isMultiScale,
-            **self.config)
+        self.discrete_op = advec(
+            velocity=self.discreteFields[self.velocity],
+            fields_on_grid=advected_discrete_fields,
+            direction=self.direction,
+            rwork=rwork, iwork=iwork,
+            **self._kwds)
 
-        # -- Final set up --
-        self.discreteOperator.setUp()
-        self._isUpToDate = True
+        self._is_uptodate = True
 
-    @debug
-    def apply(self, simulation, dtCoeff, split_id, old_dir=None):
-        for req in self.requirements:
-            req.wait()
-        self.discreteOperator.apply(
-            simulation, dtCoeff, split_id, old_dir)
+    def get_work_properties(self):
+        """
+        Work vector for advection in one dir :
+
+        [ interp , part_positions, fields_on_particles]
+        interp part is used also for remesh and time-integrator.
+        """
+        if not self._is_discretized:
+            msg = 'The operator must be discretized '
+            msg += 'before any call to this function.'
+            raise RuntimeError(msg)
+        dimension = self.domain.dimension
+        res = {'rwork': [], 'iwork': []}
+        if self.method[Support].find('gpu') < 0:
+            tiw = self.method[TimeIntegrator].getWorkLengths(1)
+            iw, iiw = \
+                self.method[Interpolation].getWorkLengths(domain_dim=dimension)
+            rw, riw = Remeshing.getWorkLengths(domain_dim=dimension)
+            iwl = max(iiw, riw)
+            rw = max(tiw + iw, rw)
+        else:
+            # For GPU version, no need of numerics works
+            iwl, rw = 0, 0
+        # Shape of reference comes from fields, not from velocity
+        advected_discrete_fields = [self.discreteFields[v]
+                                    for v in self.variables
+                                    if v is not self.velocity]
+        memshape = advected_discrete_fields[0].topology.mesh.resolution
+        rw += np.sum([f.nbComponents for f in self.advected_fields])
+        if self.method[Support].find('gpu') < 0 or \
+           self.method[Support].find('gpu_2k') >= 0:
+            rw += 1  # positions
+        for i in xrange(rw):
+            res['rwork'].append(memshape)
+        for i in xrange(iwl):
+            res['iwork'].append(memshape)
+        return res
 
     @debug
-    def finalize(self):
+    @opapply
+    def apply(self, simulation=None, dtCoeff=1.0, split_id=0, old_dir=0):
         """
-        Memory cleaning.
+        Apply this operator to its variables.
+        @param simulation : object that describes the simulation
+        parameters (time, time step, iteration number ...), see
+        parmepy.problem.simulation.Simulation for details.
         """
-        self.timer = Timer(self, suffix=S_DIR[self.dir])
-        self.discreteOperator.finalize()
-        self.timer = self.timer + self.discreteOperator.timer
+        self.discrete_op.apply(simulation,
+                                    dtCoeff, split_id, old_dir)
diff --git a/HySoP/hysop/operator/advold.py b/HySoP/hysop/operator/advold.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a8e9ecf8aed2c842eb53c8cd3a75e3c1f61cd40
--- /dev/null
+++ b/HySoP/hysop/operator/advold.py
@@ -0,0 +1,519 @@
+"""
+@file advection.py
+
+Advection of a field.
+"""
+from parmepy.constants import debug, np, PARMES_INDEX, S_DIR
+from parmepy.operator.computational import Computational
+from parmepy.methods_keys import Scales, TimeIntegrator, Interpolation,\
+    Remesh, Support, Splitting, MultiScale
+from parmepy.numerics.remeshing import L2_1
+from parmepy.fields.continuous import Field
+from parmepy.operator.redistribute import Redistribute
+from parmepy.operator.advectionDir import AdvectionDir
+import parmepy.default_methods as default
+import parmepy.tools.numpywrappers as npw
+
+
+class Advection(Computational):
+    """
+    Advection of a field,
+    \f{eqnarray*}
+    X = Op(X,velocity)
+    \f} for :
+    \f{eqnarray*}
+    \frac{\partial{X}}{\partial{t}} + velocity.\nabla X = 0
+    \f}
+    Note : we assume incompressible flow.
+
+    Computations are performed within a dimensional splitting as folows:
+      - 2nd order:
+        - X-dir, half time step
+        - Y-dir, half time step
+        - Z-dir, full time step
+        - Y-dir, half time step
+        - X-dir, half time step
+      - 2nd order full half-steps:
+        - X-dir, half time step
+        - Y-dir, half time step
+        - Z-dir, half time step
+        - Z-dir, half time step
+        - Y-dir, half time step
+        - X-dir, half time step
+      - 1st order g:
+        - X-dir, half time step
+        - Y-dir, half time step
+        - Z-dir, half time step
+
+    """
+
+    @debug
+    def __init__(self, velocity, advectedFields, **kwds):
+        """
+        Create a Transport operator from given variables velocity and scalar.
+
+        @param velocity : velocity variable.
+        @param advectedFields : Advected fields (may be a list of Fields).
+        @param resolutions : list of resolutions (one per variable)
+        @param method : Method used
+        @param splittingConfig : Dimensional splitting configuration
+        (default 'o2')
+        @param topo : a predefined topology to discretize variables
+        """
+        v = [velocity]
+        if isinstance(advectedFields, list):
+            self.advectedFields = advectedFields
+        else:
+            self.advectedFields = [advectedFields]
+        v += self.advectedFields
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        super(Advection, self).__init__(variables=v, **kwds)
+
+        vars_str = "_("
+        for vv in self.advectedFields:
+            vars_str += vv.name + ","
+        self.name += vars_str[0:-1] + ')'
+
+        if self.method is None:
+            self.method = default.ADVECTION
+
+        ## Transport velocity
+        self.velocity = velocity
+        ## Transported fields
+        self.output = self.advectedFields
+        self.input = [var for var in self.variables]
+
+        self.config = {}
+
+        # Find which solver is used for advection,
+        # among Scales, pure-python and GPU-like.
+        # Check also operator-splitting type.
+        if Scales in self.method.keys():
+            self._isSCALES = True
+            # Default splitting = Strang
+            if Splitting not in self.method.keys():
+                self.method[Splitting] = 'strang'
+        else:
+            self._isSCALES = False
+            assert TimeIntegrator in self.method.keys()
+            assert Interpolation in self.method.keys()
+            assert Remesh in self.method.keys()
+            assert Support in self.method.keys()
+            if Splitting not in self.method.keys():
+                self.method[Splitting] = 'o2'
+        self._old_dir = 0
+        self.splitting = []
+        self._dim = self.velocity.dimension
+        self.advecDir = None
+        if not self._isSCALES:
+            particles_advectedFields = [
+                Field(adF.domain, name="Particle_AdvectedFields",
+                      isVector=adF.isVector)
+                for adF in self.advectedFields]
+            if self.method[Support].find('gpu_1k') >= 0:
+                particles_positions = None
+            else:
+                particles_positions = \
+                    Field(self.advectedFields[0].domain,
+                          name="Particle_Position",  isVector=False
+                          )
+
+            # Directional continuous Advection operators
+            self.advecDir = [None] * self._dim
+            for i in xrange(self._dim):
+                self.advecDir[i] = AdvectionDir(
+                    self.velocity, self.advectedFields, i,
+                    particles_advectedFields, particles_positions,
+                    isMultiScale=self._isMultiScale,
+                    name_suffix=vars_str[0:-1] + ')', **kwds)
+
+        # function to switch between CPU or GPU setup.
+        self._my_setup = None
+
+    @debug
+    def discretize(self):
+        """
+        Discretisation according to the chosen method.
+        Available methods :
+        - 'scales' : SCALES fortran routines (3d only, list of vector
+        and/or scalar)
+          - 'p_O2' : order 4 method, corrected to allow large CFL number,
+          untagged particles
+          - 'p_O4' : order 4 method, corrected to allow large CFL number,
+          untagged particles
+          - 'p_L2' : limited and corrected lambda 2
+          - 'p_M4' : Lambda_2,1 (=M'4) 4 point formula
+          - 'p_M6' (default) : Lambda_4,2 (=M'6) 6 point formula
+          - 'p_M8' : M8prime formula
+          - 'p_44' : Lambda_4,4 formula
+          - 'p_64' : Lambda_6,4 formula
+          - 'p_66' : Lambda_6,6 formula
+          - 'p_84' : Lambda_8,4 formula
+        - 'gpu' : OpenCL kernels (2d and 3d, single field, scalar or vector)
+          - Kernels versions:
+            - '1k' : Single OpenCL kernel for advection and remeshing
+            - '2k' : Separate kernels
+          - Integration method:
+            - 'rk2' : Runge Kutta 2nd order advection
+            - 'rk4' : Runge Kutta 4th order advection
+          - remeshing formula:
+            - 'm4prime' : = 'l2_1'
+            - 'l2_1' : Labmda2,1 : (=M'4) 4 point formula, C1 regularity
+            - 'l2_2' : Labmda2,2 : 4 point formula, C2 regularity
+            - 'm6prime' : = 'l4_2'
+            - 'l4_2' : Labmda4,2 : (=M'6) 6 point formula, C2 regularity
+            - 'l4_3' : Labmda4,3 : 6 point formula, C3 regularity
+            - 'l4_4' : Labmda4,4 : 6 point formula, C4 regularity
+            - 'l6_3' : Labmda6,3 : 8 point formula, C3 regularity
+            - 'l6_4' : Labmda6,4 : 8 point formula, C4 regularity
+            - 'l6_5' : Labmda6,5 : 8 point formula, C5 regularity
+            - 'l6_6' : Labmda6,6 : 8 point formula, C6 regularity
+            - 'l8_4' : Labmda8,4 : 10 point formula, C4 regularity
+            - 'm8prime' : M8prime formula
+        - other : Pure python (2d and 3d, list of vector and/or scalar)
+          - Integration method:
+            - 'rk2' : Runge Kutta 2nd order advection
+            - 'rk4' : Runge Kutta 4th order advection
+          - remeshing formula:
+            - 'm4prime' : = 'l2_1'
+            - 'l2_1' : Labmda2,1 : (=M'4) 4 point formula, C1 regularity
+            - 'l2_2' : Labmda2,2 : 4 point formula, C2 regularity
+            - 'm6prime' : = 'l4_2'
+            - 'l4_2' : Labmda4,2 : (=M'6) 6 point formula, C2 regularity
+            - 'l4_3' : Labmda4,3 : 6 point formula, C3 regularity
+            - 'l4_4' : Labmda4,4 : 6 point formula, C4 regularity
+            - 'l6_3' : Labmda6,3 : 8 point formula, C3 regularity
+            - 'l6_4' : Labmda6,4 : 8 point formula, C4 regularity
+            - 'l6_5' : Labmda6,5 : 8 point formula, C5 regularity
+            - 'l6_6' : Labmda6,6 : 8 point formula, C6 regularity
+            - 'l8_4' : Labmda8,4 : 10 point formula, C4 regularity
+            - 'm8prime' : M8prime formula
+        """
+        # --- Advection solver from SCALES ---
+        if self._isSCALES:
+            if not self._dim == 3:
+                raise ValueError("Scales Advection not implemented in 2D.")
+            # - Scales imports -
+            from parmepy.f2py import scales2py as scales
+
+            # - Extract order form self.method (default p_M6) -
+            order = None
+            for o in ['p_O2', 'p_O4', 'p_L2',
+                      'p_M4', 'p_M6', 'p_M8',
+                      'p_44', 'p_64', 'p_66', 'p_84']:
+                if self.method[Scales].find(o) >= 0:
+                    order = o
+            if order is None:
+                print ('Unknown advection method, turn to default (p_M6).')
+                order = 'p_M6'
+            # - Extract splitting form self.method (default strang) -
+            splitting = 'strang'
+            for s in ['classic', 'strang', 'particle']:
+                if self.method[Splitting].find(s) >= 0:
+                    splitting = s
+
+            # - Create the topologies (get param from scales) -
+            # Scales nbcells equals resolutions - 1
+            nbcells = npw.asintarray(self.resolutions[self.advectedFields[0]])
+            nbcells -= 1
+            if self._fromTopo:
+                main_size = self.topology.size
+                comm = self.topology.comm
+                topodims = self.topology.shape
+            elif self._comm is not None:
+                main_size = self._comm.Get_size()
+                comm = self._comm
+                topodims = [1, 1, main_size]
+            else:
+                from parmepy.mpi.main_var import main_size
+                from parmepy.mpi.main_var import main_comm as comm
+                topodims = [1, 1, main_size]
+            scalesres, scalesoffset = \
+                scales.init_advection_solver(nbcells,
+                                             self.domain.length,
+                                             topodims, comm.py2f(),
+                                             order=order,
+                                             dim_split=splitting)
+            msg = 'Scales Advection not yet implemented with ghosts points.'
+            # Use same topodims as scales to create Cartesian topology
+            # in order to discretize our fields
+            # Case 1 : topology provided by user at init
+            # and one topo for all fields
+            if self._fromTopo:
+                for v in self.variables:
+                    topo = self.topologies[v]
+                    assert (topo.shape == topodims).all(),\
+                        'input topology is not scales compliant.'
+                    assert not (topo.ghosts > 0).any(), msg
+                    self.discreteFields[v] = v.discretize(topo)
+
+            # Case 2 : a dict of resolutions 
+            else:
+                if self.ghosts is not None:
+                    assert not (self.ghosts > 0).any(), msg
+                if self._singleTopo:
+                    topo = self.domain.getOrCreateTopology(
+                        self._dim, self.resolutions[self.velocity],
+                        topodims, precomputed=True, offset=scalesoffset,
+                        localres=scalesres, ghosts=self.ghosts,
+                        comm=self._comm)
+                    for v in self.variables:
+                        self.discreteFields[v] = v.discretize(topo)
+                else:
+                    for v in self.variables:
+                        topo = \
+                            self.domain.getOrCreateTopology(
+                                self._dim, self.resolutions[v], topodims,
+                                precomputed=True, offset=scalesoffset,
+                                localres=scalesres, ghosts=self.ghosts,
+                                comm=self._comm)
+                        # ... and the corresponding discrete field
+                        self.discreteFields[v] = v.discretize(topo)
+
+
+            if self._isMultiScale:
+                self.config['isMultiscale'] = self._isMultiScale
+                v_shape = np.asarray(self.resolutions[self.velocity],
+                                     dtype=PARMES_INDEX) - 1
+                scales.init_multiscale(v_shape[0], v_shape[1], v_shape[2],
+                                       self.method[MultiScale])
+            self._my_setup = self.setup_Scales
+
+        # --- GPU or pure-python advection ---
+        else:
+            for i in xrange(self._dim):
+                self.advecDir[i].discretize()
+            self.discreteFields = self.advecDir[0].discreteFields
+            self._my_setup = self.setup_Python
+
+    @staticmethod
+    def getWorkLengths(method=None, domain_dim=None):
+        """
+        Return the length of working arrays lists required
+        for advction discrete operator, depending on :
+        - the time integrator (RK2, ...)
+        - the interpolation (which depends on domain dimension)
+        - the remeshing (which depends on domain dimension)
+        @param method : the dict of parameters for the operator.
+        Default = parmepy.default_methods.ADVECTION
+        """
+        if method is None:
+            method = default.ADVECTION
+        assert Interpolation in method,\
+            'An interpolation is required for the advection method.'
+        assert TimeIntegrator in method,\
+            'A time integrator is required for the advection method.'
+        assert Remesh in method,\
+            'A remesh is required for the advection method.'
+        tw = method[TimeIntegrator].getWorkLengths(1)
+        iw, iiw = method[Interpolation].getWorkLengths(domain_dim=domain_dim)
+        rw, riw = method[Remesh].getWorkLengths(domain_dim=domain_dim)
+        return max(tw + iw, rw), max(iiw, riw)
+
+    def setWorks(self, rwork=None, iwork=None):
+        if rwork is None:
+            rwork = []
+        if iwork is None:
+            iwork = []
+        if not self._isSCALES:
+            for i in xrange(self._dim):
+                self.advecDir[i].setWorks(rwork, iwork)
+
+    def setup(self):
+        # Check resolutions to set multiscale case, if required.
+        self._isMultiScale = False
+        v_resol = self.variables[self.velocity]
+        if v_resol != self.variables[self.advectedFields[0]]:
+            self._isMultiScale = True
+        if self._isMultiScale and not MultiScale in self.method.keys():
+            print ("Using default mutiscale interpolation : L2_1")
+            self.method[MultiScale] = L2_1
+
+        if not self._is_uptodate:
+            self._my_setup()
+
+    def setup_Scales(self):
+        advectedDiscreteFields = [self.discreteFields[f]
+                                  for f in self.advectedFields]
+        # - Create the discreteOperator from the
+        # list of discrete fields -
+        from parmepy.operator.discrete.scales_advection import \
+            ScalesAdvection
+        self.discreteOperator = ScalesAdvection(
+            self.discreteFields[self.velocity],
+            advectedDiscreteFields, method=self.method,
+            **self.config)
+
+        # -- Final set up --
+        self.discreteOperator.setup()
+        self._is_uptodate = True
+
+    def setup_Python(self):
+        for i in xrange(self._dim):
+            self.advecDir[i].setup()
+        # If topologies differs between directions,
+        # one need Redistribute
+        # operators
+        main_size = self.advecDir[0].discreteFields[
+            self.velocity].topology.size
+        if main_size > 1:
+            # Build bridges
+            self.bridges = self._dim * [None]
+            if main_size > 1:
+                for dfrom in xrange(self.domain.dimension):
+                    self.bridges[dfrom] = self._dim * [None]
+                    for dto in xrange(self._dim):
+                        if dfrom == dto:
+                            self.bridges[dfrom][dto] = None
+                        else:
+                            nsuffix = str(dfrom) + '_' + str(dto)
+                            self.bridges[dfrom][dto] = Redistribute(
+                                variables=self.advectedFields,
+                                opFrom=self.advecDir[dfrom],
+                                opTo=self.advecDir[dto],
+                                name_suffix=nsuffix)
+                            self.bridges[dfrom][dto].setup()
+
+        # Splitting configuration
+        if self.method[Splitting] == 'o2_FullHalf':
+            ## Half timestep in all directions
+            [self.splitting.append((i, 0.5))
+             for i in xrange(self._dim)]
+            [self.splitting.append((self._dim - 1 - i, 0.5))
+             for i in xrange(self._dim)]
+        elif self.method[Splitting] == 'o1':
+            [self.splitting.append((i, 1.)) for i in xrange(self._dim)]
+        elif self.method[Splitting] == 'x_only':
+            self.splitting.append((0, 1.))
+        elif self.method[Splitting] == 'y_only':
+            self.splitting.append((1, 1.))
+        elif self.method[Splitting] == 'z_only':
+            self.splitting.append((2, 1.))
+        elif self.method[Splitting] == 'o2':
+            ## Half timestep in all directions but last
+            [self.splitting.append((i, 0.5))
+             for i in xrange(self._dim - 1)]
+            self.splitting.append((self._dim - 1, 1.))
+            [self.splitting.append((self._dim - 2 - i, 0.5))
+             for i in xrange(self._dim - 1)]
+        else:
+            raise ValueError('Unknown splitting configuration:' +
+                             self.method[Splitting])
+
+        if self.method[Support].find('gpu') >= 0:
+            splitting_nbSteps = len(self.splitting)
+            for d in xrange(self.domain.dimension):
+                dOp = self.advecDir[d].discreteOperator
+                assert len(dOp.exec_list) == splitting_nbSteps, \
+                    "Discrete operator execution " + \
+                    "list and splitting steps sizes must be equal " + \
+                    str(len(dOp.exec_list)) + " != " + \
+                    str(splitting_nbSteps)
+
+        if main_size > 1:
+            self.apply = self._apply_Comm
+        else:
+            self.apply = self._apply_noComm
+        self._is_uptodate = True
+
+        if self.method[Support].find('gpu') >= 0:
+            s = ""
+            device_id = self.advecDir[d].discreteOperator.cl_env._device_id
+            gpu_comm = self.advecDir[d].discreteOperator.cl_env.gpu_comm
+            gpu_rank = gpu_comm.Get_rank()
+            if gpu_rank == 0:
+                s += "=== OpenCL buffers allocated"
+                s += " on Device:{0} ===\n".format(device_id)
+                s += "Global memory used:\n"
+            total_gmem = 0
+            for d in xrange(self.domain.dimension):
+                g_mem_d = 0
+                for df in self.advecDir[d].discreteOperator.variables:
+                    if not df.gpu_allocated:
+                        df.allocate()
+                        g_mem_df = gpu_comm.allreduce(df.mem_size)
+                        g_mem_d += g_mem_df
+                if gpu_rank == 0:
+                    s += " Advection" + S_DIR[d] + ": {0:9d}".format(g_mem_d)
+                    s += "Bytes ({0:5d} MB)\n".format(g_mem_d / (1024 ** 2))
+                total_gmem += g_mem_d
+            if gpu_rank == 0:
+                s += " Total      : {0:9d}".format(total_gmem)
+                s += "Bytes ({0:5d} MB)\n".format(total_gmem / (1024 ** 2))
+                s += "Local memory used:\n"
+            total_lmem = 0
+            for d in xrange(self.domain.dimension):
+                l_mem_d = gpu_comm.allreduce(
+                    self.advecDir[d].discreteOperator.size_local_alloc)
+                if gpu_rank == 0:
+                    s += " Advection" + S_DIR[d] + ": {0:9d}".format(l_mem_d)
+                    s += "Bytes ({0:5d} MB)\n".format(l_mem_d / (1024 ** 2))
+                total_lmem += l_mem_d
+            if gpu_rank == 0:
+                s += " Total      : {0:9d}".format(total_lmem) + "Bytes"
+                print (s)
+
+    @debug
+    def _apply_noComm(self, simulation=None):
+        """
+        Apply this operator to its variables.
+        @param simulation : object that describes the simulation
+        parameters (time, time step, iteration number ...), see
+        parmepy.problem.simulation.Simulation for details.
+
+        Redefinition for advection. Applying a dimensional splitting.
+        """
+        for req in self.requirements:
+            req.wait()
+        for split_id, split in enumerate(self.splitting):
+            self.advecDir[split[0]].apply(
+                simulation, split[1], split_id, self._old_dir)
+            self._old_dir = split[0]
+
+    @debug
+    def _apply_Comm(self, simulation=None):
+        """
+        Apply this operator to its variables.
+        @param simulation : object that describes the simulation
+        parameters (time, time step, iteration number ...), see
+        parmepy.problem.simulation.Simulation for details.
+
+        Redefinition for advection. Applying a dimensional splitting.
+        """
+        for req in self.requirements:
+            req.wait()
+        for split_id, split in enumerate(self.splitting):
+            # Calling the redistribute operators between directions
+            if not self._old_dir == split[0]:
+                self.bridges[self._old_dir][split[0]].apply()
+                self.bridges[self._old_dir][split[0]].wait()
+            self.advecDir[split[0]].apply(
+                simulation, split[1], split_id, self._old_dir)
+            self._old_dir = split[0]
+
+    @debug
+    def finalize(self):
+        """
+        Memory cleaning.
+        """
+        if self._isSCALES:
+            Computational.finalize(self)
+        else:
+            for dop in self.advecDir:
+                dop.finalize()
+                self.timer = self.timer + dop.timer
+
+    def __str__(self):
+        """
+        Common printings for operators
+        """
+        shortName = str(self.__class__).rpartition('.')[-1][0:-2]
+        if self._isSCALES:
+            super(Advection, self).__str__()
+        else:
+            for i in xrange(self._dim):
+                if self.advecDir[i].discreteOperator is not None:
+                    s = str(self.advecDir[i].discreteOperator)
+                else:
+                    s = shortName + " operator. Not discretised."
+        return s + "\n"
diff --git a/HySoP/hysop/operator/analytic.py b/HySoP/hysop/operator/analytic.py
index f1b6e968fb0b2117a3215d9c2688cb0922ddef37..d8f0585f0b7aa9a86805cedf5d03d20926ff53af 100644
--- a/HySoP/hysop/operator/analytic.py
+++ b/HySoP/hysop/operator/analytic.py
@@ -1,44 +1,41 @@
 """
 @file operator/analytic.py
-
-Analytic operator representation.
+Initialize fields on a grid, with a user-defined function
 """
 from parmepy.constants import debug
-from parmepy.operator.continuous import Operator
+from parmepy.operator.continuous import opsetup, opapply
+from parmepy.operator.computational import Computational
+from parmepy.methods_keys import Support
 
 
-class Analytic(Operator):
+class Analytic(Computational):
     """
     Applies an analytic formula, given by user, on its fields.
     """
 
     @debug
-    ## def __init__(self, variables, resolutions=None, formula=None,
-    ##              ghosts=None, topo=None, doVectorize=False, method=None,
-    ##              task_id=None, comm=None):
     def __init__(self, formula=None, doVectorize=False, **kwds):
         """
-        Create an operator using an analytic formula to compute field(s).
-        @param variables : list of fields on which this operator will apply.
+        Operator to apply a user-defined formula onto a list of fields.
         @param formula : the formula to be applied
-        @param resolutions : list of resolutions (one per variable)
-        @param topo : a predefined topology to discretize variables
-        @param ghosts : number of points in the ghost layer
-        @param method : method used (Used to specify GPU support)
-
-        @remark : method seems useless but it is useful for GPU.
+        @param doVectorize : true if formula must be vectorized (numpy),
+        default = false.
         """
         super(Analytic, self).__init__(**kwds)
+        isGPU = False
+        if 'method' in kwds.keys() and Support in kwds['method'].keys():
+            isGPU = kwds['method'][Support].find('gpu') >= 0
         if formula is not None:
             ## A formula applied to all variables of this operator
             self.formula = formula
             for v in self.variables:
                 v.setFormula(formula, doVectorize)
             self.doVectorize = doVectorize
-        else:
-            assert self.variables[0].formula is not None
-            self.formula = self.variables[0].formula
-            self.doVectorize = self.variables[0].doVectorize
+        elif not isGPU:
+            vref = self.variables.keys()[0]
+            assert vref.formula is not None
+            self.formula = vref.formula
+            self.doVectorize = vref.doVectorize
             # Only one formula allowed per operator
             for v in self.variables:
                 assert v.formula is self.formula
@@ -47,23 +44,19 @@ class Analytic(Operator):
 
     def discretize(self):
         super(Analytic, self)._standard_discretize()
-        
-    def setUp(self):
-        self._isUpToDate = True
+
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
+        self._is_uptodate = True
 
     @debug
+    @opapply
     def apply(self, simulation=None):
         assert simulation is not None, \
             "Missing simulation value for computation."
-        # Calling for requirements completion
-        for red in self.requirements:
-            red.wait()
         for v in self.variables:
             topo = self.discreteFields[v].topology
-            v.initialize(currentTime=simulation.time, topo=topo)
+            v.initialize(time=simulation.time, topo=topo)
 
-    def __str__(self):
-        s = super(Analytic, self).__str__()
-        if self.discreteOperator is None:
-            s += "Formula = " + str(self.formula)
-        return s + "\n"
+    def get_profiling_info(self):
+        pass
diff --git a/HySoP/hysop/operator/baroclinic.py b/HySoP/hysop/operator/baroclinic.py
index 8b775d5f144868797d95af177be0c1395a5127c8..6866105b4dfab2c1c3824e44aed6208ddd987496 100644
--- a/HySoP/hysop/operator/baroclinic.py
+++ b/HySoP/hysop/operator/baroclinic.py
@@ -4,15 +4,16 @@
 
 MultiPhase Rot Grad P
 """
-from parmepy.operator.continuous import Operator
-from parmepy.operator.discrete.baroclinic import Baroclinic_d
+from parmepy.operator.computational import Computational
+from parmepy.operator.discrete.baroclinic import Baroclinic as BD
 from parmepy.methods_keys import SpaceDiscretisation
 from parmepy.numerics.finite_differences import FD_C_4
 from parmepy.constants import debug
 import parmepy.default_methods as default
+from parmepy.operator.continuous import opsetup
 
 
-class Baroclinic(Operator):
+class Baroclinic(Computational):
     """
     Pressure operator representation
     """
@@ -35,6 +36,7 @@ class Baroclinic(Operator):
         @param ghosts : number of ghosts points. Default depends on the method.
         Autom. computed if not set.
         """
+        assert 'variables' not in kwds, 'variables parameter is useless.'
         super(Baroclinic, self).__init__(variables=[velocity,
                                                     vorticity, density],
                                          **kwds)
@@ -56,20 +58,19 @@ class Baroclinic(Operator):
                 baroclinic operator.")
 
         super(Baroclinic, self)._standard_discretize(nbGhosts)
-        
+
     @debug
-    def setUp(self):
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
         """
         Baroclinic operator discretization method.
         Create a discrete Baroclinic operator from given specifications.
         """
-        self.discreteOperator = \
-            Baroclinic_d(self.discreteFields[self.velocity],
-                         self.discreteFields[self.vorticity],
-                         self.discreteFields[self.density],
-                         self.viscosity,
-                         method=self.method)
-
-        self.discreteOperator.setUp()
-        self._isUpToDate = True
+        self.discrete_op = \
+            BD(self.discreteFields[self.velocity],
+               self.discreteFields[self.vorticity],
+               self.discreteFields[self.density],
+               self.viscosity,
+               method=self.method)
 
+        self._is_uptodate = True
diff --git a/HySoP/hysop/operator/compold b/HySoP/hysop/operator/compold
new file mode 100644
index 0000000000000000000000000000000000000000..be60a4304d27bb2111ba7f75923807ce12e124a5
--- /dev/null
+++ b/HySoP/hysop/operator/compold
@@ -0,0 +1,381 @@
+"""
+@file operator/continuous.py
+
+Interface common to all continuous operators.
+"""
+from abc import ABCMeta, abstractmethod
+from parmepy.constants import debug
+from parmepy.operator.continuous import Operator
+import numpy as np
+from parmepy.mpi.topology import Cartesian
+from parmepy.tools.parameters import Discretization
+
+
+class Computational(Operator):
+    """
+    Abstract base class for computational operators.
+
+    An operator is composed of :
+    - a set of continuous variables (at least one)
+    - a method which defined how it would be discretized/processed
+    - a discrete operator : object build using the method
+    and the discretized variables.
+
+    To each variable a 'resolution' is associated, used
+    to create a topology and a discrete field.
+    See details in 'real' operators (derived classes) description.
+    """
+    __metaclass__ = ABCMeta
+
+    @debug
+    def __new__(cls, *args, **kw):
+        return object.__new__(cls, *args, **kw)
+
+    @debug
+    @abstractmethod
+    def __init__(self, discretization=None, method=None, **kwds):
+        """
+        Build the operator.
+        The only required parameter is a list of variables.
+        @param variables : list of fields on which this operator will apply.
+        @param method : a dictionnary of methods.
+        See methods.py for authorized values.
+        the operator.
+        @param name : operator id.
+        @param topo : a predefined topology to discretize variables
+        @param ghosts : number of points in the ghost layer
+        @param comm : MPI communicator to build topologies
+        Warning : you cannot set both topo and ghosts.
+        @param resolutions : dictionnary of resolutions, one for each variable.
+        Warning : you cannot set both topo and resolutions but you must set
+        one of them, except for redistribute operators.
+        """
+        # Base class init
+        super(Computational, self).__init__(**kwds)
+
+        ## A dictionnary of parameters, to define numerical methods used
+        ## used to discretize the operator.
+        ## When method is None, each operator must provide a default
+        ## behavior.
+        self.method = method
+
+        ## The discretization of this operator.
+        self.discreteOperator = None
+
+        ## A dictionnary of discreteFields associated with this operator
+        ## key = continuous variable \n
+        ## Example : discrete_variable = discreteFields[velocity]
+        ## returns the discrete fields that corresponds to the continuous
+        ## variable velocity.
+        self.discreteFields = {}
+
+        if not self._varsfromList:
+            msg = 'discretization parameter is useless when variables are set'
+            msg += ' from a dict.'
+            assert discretization is None, msg
+
+        self._discretization = discretization
+        # Fill variables dictionnary
+        for v in self.variables:
+            self.variables[v] = self._discretization
+        # Remark FP: discretization may be None in two cases:
+        # - not required when variables is set from a dict
+        # - the current task does not need to discretize this operator.
+
+        if discretization is not None:
+            msg = 'discretization parameter must be either a '
+            msg += 'parmepy.tools.parameter.Discretization or '
+            msg += 'a parmepy.mpi.topology.Cartesian object.'
+            # discretization may be a Discretization or a topology.
+            if not isinstance(discretization, Cartesian):
+                assert isinstance(discretization, Discretization), msg
+                self._discretization = discretization
+
+            # Case 1, with a single topo
+            if isinstance(topologies, Cartesian):
+                self._singleTopo = True
+                for v in self.variables:
+                    self.topologies[v] = topologies
+
+            # Case 1, with one topo per variable
+            elif isinstance(topologies, dict):
+                self.topologies = topologies
+            else:
+                msg = 'Topo attribute must be either a dictionnary'
+                msg += ' or a parmepy.mpi.topology.Cartesian object.'
+                raise AttributeError(msg)
+
+            # Build resolutions dictionnary and check topologies
+            self.resolutions = {}
+            for v in self.variables:
+                topo = self.topologies[v]
+                assert isinstance(topo, Cartesian)
+                self.resolutions[v] = topo.globalMeshResolution
+
+        elif resolutions is not None:
+            msg = 'You can not give both topologies and resolutions arguments.'
+            assert topologies is None, msg
+            self._fromTopo = False
+            # Case 2, with a single resolution
+            if not isinstance(resolutions, dict):
+                self._singleTopo = True
+                self.resolutions = {}
+                for v in self.variables:
+                    self.resolutions[v] = resolutions
+            else:
+                # Case 2, with one resolution per variable
+                self.resolutions = resolutions
+                # if user has given a dict but with one
+                # single resolution for all vars ...
+                ref = self.resolutions.values()[0]
+                self.singleTopo = False
+                cond = [a for a in self.resolutions.values()
+                        if list(a) != list(ref)]
+                if len(cond) == 0:
+                    self._singleTopo = True
+
+    @staticmethod
+    def getWorkLengths(method=None, domain_dim=None):
+        """
+        Return the length of working arrays lists required
+        for the discrete operator.
+        """
+        assert method is None
+        assert domain_dim is None
+        return 0, 0
+
+    def setWorks(self, rwork=None, iwork=None):
+        if rwork is None:
+            rwork = []
+        if iwork is None:
+            iwork = []
+        self.discreteOperator.setWorks(rwork, iwork)
+
+    def _setGhosts(self, min_ghosts):
+        """
+        Set/check ghost layer according to input min_ghosts
+        """
+        # redim self.ghosts if required
+        if self.ghosts is not None:
+            self.ghosts[self.ghosts < min_ghosts] = min_ghosts
+        else:
+            self.ghosts = np.ones((self.domain.dimension)) * min_ghosts
+
+    @abstractmethod
+    def discretize(self):
+        """
+        For each variable, check if a proper topology has been defined,
+        if not, build one according to 'discretization' parameters set
+        during initialization of the class.
+        Then, discretize each variable on this topology.
+        """
+
+    def _discretize_vars(self):
+        """
+        Discretize all variables of the current operator.
+        """
+        for v in self.variables:
+            msg = 'Missing topology to discretize ' + v.name
+            msg += ' in operator ' + self.name
+            assert isinstance(self.variables[v], Cartesian), msg
+
+            self.discreteFields[v] = v.discretize(self.variables[v])
+
+    def _checkVariables(self):
+        if self._varsfromList:
+            # In that case, single_topo is True
+            # but we need to check if discretization param
+            # was a topology or a Discretization.
+            msg = 'required parameter discretization has not been'
+            msg += ' set during operator construction.'
+            assert self._discretization is not None
+            single_topo = True
+            if isinstance(self._discretization, Cartesian):
+                # No need to build topologies
+                build_topos = False
+            elif isinstance(self._discretization, Discretization):
+                build_topos = True
+            else:
+                msg = 'Wrong type for parameter discretization in'
+                msg += ' operator construction.'
+                raise ValueError(msg)
+        else:
+            msg = 'discretization parameter in operator construction is '
+            msg += 'useless when variables are set from a dict.'
+            assert self._discretization is None, msg
+            single_topo = False
+            build_topos = {}
+            for v in self.variables:
+                disc = self.variables[v]
+                if isinstance(disc, Cartesian):
+                    build_topos[v] = False
+                elif isinstance(disc, Discretization):
+                    build_topos[v] = True
+                else:
+                    msg = 'Wrong type for values in variables dictionnary '
+                    msg += '(parameter in operator construction).'
+                    raise ValueError(msg)
+
+            ref = self.variables.values()[0]
+            single_topo = True
+            for disc in self.variables.values():
+                single_topo = ref == disc and single_topo
+
+            if single_topo:
+                build_topos = build_topos.values()[0]
+
+        return single_topo, build_topos
+
+    def _standard_discretize(self, min_ghosts=0):
+        """
+        This functions provides a standard way to discretize the operator,
+        but some operators may need a specific discretization process.
+        """
+        # One topo for all fields ...
+        if self._singleTopo:
+            self._discretize_single_topo(min_ghosts)
+        else:
+            # ... or one topo for each field.
+            self._discretize_multi_topo(min_ghosts)
+
+    def _discretize_single_topo(self, min_ghosts=0):
+        """
+        Discretization of all fields with the same topo,
+        predefined during initialization.
+        """
+        # Two cases:
+        # 1 - topo set during initialization
+        # 2 - (Resolution, Ghosts) set during init --> need to build a topo.
+        if self._discretization is None:
+            topo = self.topologies.values()[0]
+            assert (topo.ghosts >= ghosts_min).all()
+        else:
+            self._setGhosts(min_ghosts)
+            topo = self.domain.create_topology(discretization,
+                                               self.resolutions.values()[0],
+                                               ghosts=self.ghosts,
+                                               comm=self._comm)
+        self._discretize_vars()
+
+    def _discretize_multi_topo(self, min_ghosts=0):
+        """
+        A topo is build for each field, standard default mpi distribution,
+        with global mesh resolution given at init.
+        """
+        if self._fromTopo:
+            for v in self.variables:
+                topo = self.topologies[v]
+
+                assert (topo.ghosts >= min_ghosts).all()
+                self.discreteFields[v] = v.discretize(topo)
+        else:
+            self._setGhosts(min_ghosts)
+            for v in self.variables:
+                topo = self.domain.getOrCreateTopology(self.domain.dimension,
+                                                       self.resolutions[v],
+                                                       ghosts=self.ghosts,
+                                                       comm=self._comm)
+                self.topologies[v] = topo
+                self.discreteFields[v] = v.discretize(topo)
+
+    def _discretize_fftw(self, resolution):
+        """
+        fftw specific way to discretize variables for a given
+        'reference' resolution.
+        We assume that in fft case, only one topology must be used
+        for all variables.
+        """
+        # Get main communicator
+        if self._fromTopo:
+            comm = self.topologies[self.vorticity].comm
+            commsize = self.topologies[self.vorticity].size
+        elif self._comm is not None:
+            comm = self._comm
+            commsize = self._comm.Get_size()
+        else:
+            from parmepy.mpi.main_var import main_comm as comm
+            from parmepy.mpi.main_var import main_size as commsize
+        from parmepy.f2py import fftw2py
+        localres, localoffset = fftw2py.init_fftw_solver(
+            resolution, self.domain.length, comm=comm.py2f())
+
+        topodims = np.ones(self.domain.dimension)
+        topodims[-1] = commsize
+
+        # Case 1 : topology provided by user at init:
+        if self._fromTopo:
+            topo = self.topologies.values()[0]
+            # Check if input topo is complient with fftw topo
+            assert (topo.shape == topodims).all(), 'input topology is\
+                not compliant with fftw.'
+            self._discretize_single_topo()
+        # Case 2 : input = resolution + [ghosts, comm]
+        else:
+            topo = self.domain.getOrCreateTopology(self.domain.dimension,
+                                                   resolution, topodims,
+                                                   precomputed=True,
+                                                   offset=localoffset,
+                                                   localres=localres,
+                                                   ghosts=self.ghosts,
+                                                   comm=self._comm)
+            for v in self.variables:
+                self.discreteFields[v] = v.discretize(topo)
+
+    @abstractmethod
+    def setUp(self):
+        """
+        Last step of initialization. After this, the operator must be
+        ready for apply call.
+
+        Main step : setup for discrete operators.
+        """
+
+    @debug
+    def finalize(self):
+        """
+        Memory cleaning.
+        """
+        if self.discreteOperator is not None:
+            self.discreteOperator.finalize()
+            self.timer = self.timer + self.discreteOperator.timer
+
+    @debug
+    def apply(self, simulation=None):
+        """
+        Apply this operator to its variables.
+        @param simulation : object that describes the simulation
+        parameters (time, time step, iteration number ...), see
+        parmepy.problem.simulation.Simulation for details.
+        """
+        for req in self.requirements:
+            req.wait()
+        self.discreteOperator.apply(simulation)
+
+    def printComputeTime(self):
+        """ Time monitoring."""
+        if self.discreteOperator is not None:
+            self.discreteOperator.printComputeTime()
+            self.time_info = self.discreteOperator.time_info
+        else:
+            from parmepy.mpi.main_var import main_rank
+            shortName = str(self.__class__).rpartition('.')[-1][0:-2]
+            s = '[' + str(main_rank) + '] ' + shortName
+            s += " : operator not discretized --> no computation, time = 0."
+            print s
+
+    def updateGhosts(self):
+        """
+        Update ghost points values, if any.
+        """
+        self.discreteOperator.updateGhosts()
+
+    def __str__(self):
+        """
+        Common printings for operators
+        """
+        shortName = str(self.__class__).rpartition('.')[-1][0:-2]
+        if self.discreteOperator is not None:
+            s = str(self.discreteOperator)
+        else:
+            s = shortName + " operator. Not discretised."
+        return s + "\n"
diff --git a/HySoP/hysop/operator/computational.py b/HySoP/hysop/operator/computational.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0eaab9efb3344baf7e5f46abbf87a7257664863
--- /dev/null
+++ b/HySoP/hysop/operator/computational.py
@@ -0,0 +1,333 @@
+"""
+@file operator/continuous.py
+
+Interface common to all continuous operators.
+"""
+from abc import ABCMeta, abstractmethod
+from parmepy.constants import debug
+from parmepy.operator.continuous import Operator
+from parmepy.mpi.topology import Cartesian
+from parmepy.tools.parameters import Discretization
+
+
+class Computational(Operator):
+    """
+    Abstract base class for computational operators.
+
+    An operator is composed of :
+    - a set of continuous variables (at least one)
+    - a method which defined how it would be discretized/processed
+    - a discrete operator : object build using the method
+    and the discretized variables.
+
+    To each variable a 'resolution' is associated, used
+    to create a topology and a discrete field.
+    See details in 'real' operators (derived classes) description.
+    """
+    __metaclass__ = ABCMeta
+
+    @debug
+    def __new__(cls, *args, **kw):
+        return object.__new__(cls, *args, **kw)
+
+    @debug
+    @abstractmethod
+    def __init__(self, discretization=None, method=None, **kwds):
+        """
+        Build the operator.
+        The only required parameter is a list of variables.
+        @param variables : list of fields on which this operator will apply.
+        @param method : a dictionnary of methods.
+        See methods.py for authorized values.
+        the operator.
+        @param name : operator id.
+        @param topo : a predefined topology to discretize variables
+        @param ghosts : number of points in the ghost layer
+        @param comm : MPI communicator to build topologies
+        Warning : you cannot set both topo and ghosts.
+        @param resolutions : dictionnary of resolutions, one for each variable.
+        Warning : you cannot set both topo and resolutions but you must set
+        one of them, except for redistribute operators.
+        """
+        # Base class init
+        super(Computational, self).__init__(**kwds)
+
+        ## A dictionnary of parameters, to define numerical methods used
+        ## used to discretize the operator.
+        ## When method is None, each operator must provide a default
+        ## behavior.
+        self.method = method
+
+        ## The discretization of this operator.
+        self.discreteOperator = None
+
+        ## A dictionnary of discreteFields associated with this operator
+        ## key = continuous variable \n
+        ## Example : discrete_variable = discreteFields[velocity]
+        ## returns the discrete fields that corresponds to the continuous
+        ## variable velocity.
+        self.discreteFields = {}
+
+        if not self._varsfromList:
+            msg = 'discretization parameter is useless when variables are set'
+            msg += ' from a dict.'
+            assert discretization is None, msg
+
+        self._discretization = discretization
+        # Remark FP: discretization may be None in two cases:
+        # - not required when variables is set from a dict
+        # - the current task does not need to discretize this operator.
+
+        # False if fields have different discretizations.
+        # Set during discretize call.
+        self._single_topo = True
+
+        # If true, ready for setup ...
+        # Turn to true after self._discretize_vars call.
+        self._is_discretized = False
+
+    def get_work_properties(self):
+        """
+        Return the length of working arrays lists required
+        for the discrete operator.
+        @return shapes, shape of the arrays:
+        shapes['rwork'] == list of shapes for real arrays,
+        shapes['iwork'] == list of shapes for int arrays.
+        len(shapes['...'] gives the number of required arrays.
+        """
+        return {'rwork': None, 'iwork': None}
+
+    def discretize(self):
+        """
+        For each variable, check if a proper topology has been defined,
+        if not, build one according to 'discretization' parameters set
+        during initialization of the class.
+        Then, discretize each variable on this topology.
+        """
+        self._standard_discretize()
+
+    def _discretize_vars(self):
+        """
+        Discretize all variables of the current operator.
+        """
+        for v in self.variables:
+            msg = 'Missing topology to discretize ' + v.name
+            msg += ' in operator ' + self.name
+            assert isinstance(self.variables[v], Cartesian), msg
+
+            self.discreteFields[v] = v.discretize(self.variables[v])
+        self._is_discretized = True
+
+    def _check_variables(self):
+        """
+        Check variables and discretization parameters
+        Set single_topo: if true all fields are discretized with the
+        same topo
+        @return build_topos : a dict (key == field), if build_topos[v] is true,
+        a topology must be built for v. In that case, the discretization has
+        been saved in self.variables[v] during init. In the other case
+        self.variables[v] is the required topology.
+
+        Remark : since operators belong to one and only one task, this function
+        must not be called by all tasks. So it can not be called at init.
+        """
+        if self._varsfromList:
+            # In that case, self._single_topo is True
+            # but we need to check if discretization param
+            # was a topology or a Discretization.
+            msg = 'required parameter discretization has not been'
+            msg += ' set during operator construction.'
+            assert self._discretization is not None
+            # Fill variables dictionnary
+            for v in self.variables:
+                self.variables[v] = self._discretization
+            self._single_topo = True
+            if isinstance(self._discretization, Cartesian):
+                # No need to build topologies
+                build_topos = False
+            elif isinstance(self._discretization, Discretization):
+                build_topos = True
+            else:
+                msg = 'Wrong type for parameter discretization in'
+                msg += ' operator construction.'
+                raise ValueError(msg)
+        else:
+            msg = 'discretization parameter in operator construction is '
+            msg += 'useless when variables are set from a dict.'
+            assert self._discretization is None, msg
+            self._single_topo = False
+            build_topos = {}
+            for v in self.variables:
+                disc = self.variables[v]
+                if isinstance(disc, Cartesian):
+                    build_topos[v] = False
+                elif isinstance(disc, Discretization):
+                    build_topos[v] = True
+                else:
+                    msg = 'Wrong type for values in variables dictionnary '
+                    msg += '(parameter in operator construction).'
+                    raise ValueError(msg)
+
+            ref = self.variables.values()[0]
+            self._single_topo = True
+            for disc in self.variables.values():
+                self._single_topo = ref == disc and self._single_topo
+
+            if self._single_topo:
+                build_topos = build_topos.values()[0]
+                self._discretization = self.variables.values()[0]
+
+        return build_topos
+
+    def _standard_discretize(self, min_ghosts=0):
+        """
+        This functions provides a standard way to discretize the operator,
+        but some operators may need a specific discretization process.
+        """
+        build_topos = self._check_variables()
+        if self._single_topo:
+            # One topo for all fields ...
+            if build_topos:
+                topo = self._build_topo(self._discretization, min_ghosts)
+                for v in self.variables:
+                    self.variables[v] = topo
+            else:
+                # Topo is already built, just check its ghosts
+                topo = self.variables.values()[0].mesh.discretization.ghosts
+                assert (topo >= min_ghosts).all()
+
+        else:
+            # ... or one topo for each field.
+            for v in self.variables:
+                if build_topos[v]:
+                    self.variables[v] = self._build_topo(self.variables[v],
+                                                         min_ghosts)
+                else:
+                    assert (self.variables[v].ghosts >= min_ghosts).all()
+
+        # All topos are built, we can discretize fields.
+        self._discretize_vars()
+
+    def _build_topo(self, discretization, min_ghosts):
+        # Reset ghosts if necessary
+        ghosts = discretization.ghosts
+        ghosts[ghosts < min_ghosts] = min_ghosts
+        # build a topology from the given discretization
+        return self.domain.create_topology(discretization)
+
+    def _fftw_discretize(self):
+        """
+        fftw specific way to discretize variables for a given
+        'reference' resolution.
+        We assume that in fft case, only one topology must be used
+        for all variables.
+        """
+        build_topos = self._check_variables()
+        assert self._single_topo, 'All fields must use the same topology.'
+        # Get local mesh parameters from fftw
+        comm = self._mpis.comm
+        from parmepy.f2py import fftw2py
+
+        if build_topos:
+            # In that case, self._discretization must be
+            # a Discretization object, used for all fields.
+            # We use it to initialize scales solver
+            msg = 'Wrong type for parameter discretization (at init).'
+            assert isinstance(self._discretization, Discretization), msg
+            resolution = self._discretization.resolution
+            localres, global_start = fftw2py.init_fftw_solver(
+                resolution, self.domain.length, comm=comm.py2f())
+            # Create the parmes topo (plane, cut through ZDIR)
+            topo = self.domain.create_plane_topology_from_mesh(
+                global_start=global_start, localres=localres,
+                discretization=self._discretization)
+            for v in self.variables:
+                self.variables[v] = topo
+        else:
+            # In that case, self._discretization must be
+            # a Cartesian object, used for all fields.
+            # We use it to initialize fftw solver
+            assert isinstance(self._discretization, Cartesian)
+            topo = self._discretization
+            msg = 'input topology is not compliant with fftw.'
+            assert topo.dimension == 1, msg
+
+            from parmepy.constants import ORDER
+            if ORDER == 'C':
+                assert topo.shape[0] == self._mpis.comm.Get_size(), msg
+            else:
+                assert topo.shape[-1] == self._mpis.comm.Get_size(), msg
+
+            resolution = topo.mesh.discretization.resolution
+
+            localres, global_start = fftw2py.init_fftw_solver(
+                resolution, self.domain.length, comm=comm.py2f())
+
+        assert (topo.mesh.resolution == localres).all()
+        assert (topo.mesh.global_start == global_start).all()
+        msg = 'Ghosts points not yet implemented for fftw-type operators.'
+        assert (topo.ghosts() == 0).all(), msg
+
+        # All topos are built, we can discretize fields.
+        self._discretize_vars()
+
+    @abstractmethod
+    def setup(self, rwork=None, iwork=None):
+        """
+        Last step of initialization. After this, the operator must be
+        ready for apply call.
+
+        Main step : setup for discrete operators.
+        """
+
+    @debug
+    def finalize(self):
+        """
+        Memory cleaning.
+        """
+        if self.discreteOperator is not None:
+            self.discreteOperator.finalize()
+            self.timer = self.timer + self.discreteOperator.timer
+
+    @debug
+    def apply(self, simulation=None):
+        """
+        Apply this operator to its variables.
+        @param simulation : object that describes the simulation
+        parameters (time, time step, iteration number ...), see
+        parmepy.problem.simulation.Simulation for details.
+        """
+        for req in self.requirements:
+            req.wait()
+        assert self._is_discretized
+        self.discreteOperator.apply(simulation)
+
+    def printComputeTime(self):
+        """ Time monitoring."""
+        if self.discreteOperator is not None:
+            self.discreteOperator.printComputeTime()
+            self.time_info = self.discreteOperator.time_info
+        else:
+            from parmepy.mpi.main_var import main_rank
+            shortName = str(self.__class__).rpartition('.')[-1][0:-2]
+            s = '[' + str(main_rank) + '] ' + shortName
+            s += " : operator not discretized --> no computation, time = 0."
+            print s
+
+    def update_ghosts(self):
+        """
+        Update ghost points values, if any.
+        """
+        assert self._is_discretized
+        self.discreteOperator.update_ghosts()
+
+    def __str__(self):
+        """
+        Common printings for operators
+        """
+        shortName = str(self.__class__).rpartition('.')[-1][0:-2]
+        if self.discreteOperator is not None:
+            s = str(self.discreteOperator)
+        else:
+            s = shortName + " operator. Not discretised."
+        return s + "\n"
diff --git a/HySoP/hysop/operator/continuous.py b/HySoP/hysop/operator/continuous.py
index 040194fff985971bb1698ff2246645fd419d1b36..57a70f10d9d44765ac24ff1ae264027b85c7862a 100644
--- a/HySoP/hysop/operator/continuous.py
+++ b/HySoP/hysop/operator/continuous.py
@@ -5,8 +5,9 @@ Interface common to all continuous operators.
 """
 from abc import ABCMeta, abstractmethod
 from parmepy.constants import debug
-import numpy as np
-from parmepy.tools.timers import Timer
+from parmepy.tools.profiler import Profiler
+from parmepy.tools.parameters import MPI_params, IO_params
+import parmepy.tools.io_utils as io
 
 
 class Operator(object):
@@ -14,13 +15,13 @@ class Operator(object):
     Abstract base class for continuous operators.
 
     An operator is composed of :
-    - a set of continuous variables (at least one)
-    - a method which defined how it would be discretized/processed
-    - a discrete operator : object build using the method
-    and the discretized variables.
-
-    To each variable a 'resolution' is associated, used
-    to create a topology and a discrete field.
+    - a dictionnary of continuous variables (at least one).
+    self.variables = {v: descr_v, w:descr_w}.
+    descr type depends on the operator type (computational, redistribute ...)
+    and corresponds to the discretisation info (topology, resolution ...)
+    - a method which may define the numerical
+    method used, the device (GPU/CPU) ...
+    - a taskId : number to fix the MPI taks that owns this operator.
     See details in 'real' operators (derived classes) description.
     """
     __metaclass__ = ABCMeta
@@ -31,189 +32,153 @@ class Operator(object):
 
     @debug
     @abstractmethod
-    def __init__(self, variables, method=None, topo=None, ghosts=None,
-                 task_id=None, comm=None, resolutions=None):
+    def __init__(self, variables=None, mpi_params=None,
+                 io_params=None, **kwds):
         """
-        Build the operator.
-        The only required parameter is a list of variables.
-        @param variables : list of fields on which this operator will apply.
-        @param method : a dictionnary of methods.
-        See methods.py for authorized values.
-        the operator.
-        @param name : operator id.
-        @param topo : a predefined topology to discretize variables
-        @param ghosts : number of points in the ghost layer
-        @param comm : MPI communicator to build topologies
-        Warning : you cannot set both topo and ghosts.
-        @param resolutions : dictionnary of resolutions, one for each variable.
-        Warning : you cannot set both topo and resolutions but you must set
-        one of them, except for redistribute operators.
+        @param[in, out] variables : fields on which this operator will apply.
+        May be a list of fields or a dictionnary with fields as keys
+        and an associated value which may be:
+        - a topology
+        - a resolution for computationnal operators
+        @oaram[in] mpi_params: parmepy.tools.parameters.MPI_params to
+        set the mpi context.
+        @param[in, out] io_params : setup for i/o.
         """
+        # 1 ---- Variables setup ----
         ## List of parmepy.continuous.Fields involved in the operator.
         if isinstance(variables, list):
+            self.variables = {}
+            for v in variables:
+                self.variables[v] = None
+            self._varsfromList = True
+            # Details on descretization process must be provided
+            # in derived class (extra args like resolution, topo ...)
+        elif isinstance(variables, dict):
+            self._varsfromList = False
             self.variables = variables
+        elif variables is not None:
+            # Note that some operators may not have variables (redistribute for
+            # example).
+            msg = 'Wrong type for variables arg.'
+            msg += 'It must be a list or a dictionnary.'
+            raise AttributeError(msg)
         else:
-            self.variables = [variables]
+            # this last case corresponds with redistribute operators
+            # that may have variables implicitely defined from input
+            # source and target operators
+            #(see parmepy.operator.redistribute.Redistribute for details).
+            self.variables = {}
+
+        ## Domain of definition.
+        ## Must be the same for all variables
+        ## Set in derived class.
+        self.domain = None
+
+        # mpi context
+        self._mpis = mpi_params
+        ## tools for profiling
+        self.profiler = None
+
+        # Remark : domain, _mpis and profiler will be set properly in
+        # _set_domain_and_tasks, called in derived class, since it may
+        # require some specific initialization (check domain ...)
+
         ## Input variables.
         self.input = []
         ## Output variables.
         self.output = []
-        ## Domain of definition.
-        self.domain = self.variables[0].domain
-        for v in self.variables:
-            assert v.domain is self.domain, 'All variables of the operator\
-                must be defined on the same domain.'
-        ## Object to describe the method of discretization of this operator.
-        self.discreteOperator = None
         ## bool to check if the setup function has been called for
         ## this operator
-        self._isUpToDate = False
-        ## A dictionnary of discreteFields associated with this operator
-        ## key = continuous variable \n
-        ## Example : discrete_variable = discreteFields[velocity]
-        ## returns the discrete fields that corresponds to the continuous
-        ## variable velocity.
-        self.discreteFields = {}
-        ## The method used to discretize the operator.
-        self.method = method
-        ## Number of points in the ghost layer
-        if ghosts is not None:
-            self.ghosts = np.asarray(ghosts)
-            assert self.ghosts.size == self.domain.dimension
-        else:
-            self.ghosts = None
-
-        ## A predefined communicator. May be None
-        self._comm = comm
-        # Check if only one of topo or comm is given by user
-        assert comm is None or topo is None
-
-        # check that either topo or resolutions has been given
-        # not true for redistribute operators ...
-        # Note (JM):  This assertion is not possible in multiscale operators
-        # assert topo is not None or resolutions is not None
-        #if topo is not None and resolutions is not None:
-        #    msg = "You can not set both resolutions and topology arguments."
-        #    raise ValueError(msg)
-        ## Topology used for ALL the variables of the operator
-        self.topology = None
-        if topo is None:
-            ## Grid resolution for each variable (dictionnary)
-            self.resolutions = resolutions
-        elif resolutions is None:
-            # Use the topology for all variables
-            self.topology = topo
-            self.resolutions = {}
-            if self.ghosts is not None:
-                assert (self.ghosts == topo.ghosts).all(), 'topo.ghosts and\
-                input for ghosts must not be different'
-            else:
-                self.ghosts = topo.ghosts
-            for v in self.variables:
-                self.resolutions[v] = self.topology.globalMeshResolution
-        else:
-            # topology and resolution are given. Let the specific operator
-            # mix the two.
-            self.resolutions = resolutions
-            self.topology = topo
-            if self.ghosts is not None:
-                assert (self.ghosts == topo.ghosts).all(), 'topo.ghosts and\
-                input for ghosts must not be different'
+        self._is_uptodate = False
 
         self.name = self.__class__.__name__
-        self.task_id = task_id
-        self.timer = Timer(self)
-        ## Redistribute operator list that we must wait for.
-        self.requirements = []
+        ## List of operators that must be waited for.
+        self._wait_list = []
         ## time monitoring
         self.time_info = None
+        ## Dictionnary of optional parameters for output
+        self.io_params = io_params
+        # Object that deals with output file writing.
+        # Optional.
+        self._writer = None
+        self.ontask = False
 
-    @staticmethod
-    def getWorkLengths(method=None, domain_dim=None):
+    def _set_domain_and_tasks(self):
         """
-        Return the length of working arrays lists required
-        for the discrete operator.
+        Initialize the mpi context, depending on local variables, domain
+        and so on.
         """
-        assert method is None
-        assert domain_dim is None
-        return 0, 0
+        # When this function is called, the operator must at least
+        # have one variable.
+        assert len(self.variables) > 0
+        if isinstance(self.variables, list):
+            self.domain = self.variables[0].domain
+        elif isinstance(self.variables, dict):
+           self.domain = self.variables.keys()[0].domain
+
+        # Check if all variables have the same domain
+        for v in self.variables:
+            assert v.domain is self.domain, 'All variables of the operator\
+            must be defined on the same domain.'
+        # Set/check mpi context
+        if self._mpis is None:
+            self._mpis = MPI_params(comm=self.domain.comm_task,
+                                    task_id=self.domain.currentTask())
 
-    def setWorks(self, rwork=None, iwork=None):
-        if rwork is None:
-            rwork = []
-        if iwork is None:
-            iwork = []
-        self.discreteOperator.setWorks(rwork, iwork)
+        # Set profiler
+        self.profiler = Profiler(self, self.domain.comm_task)
 
-    @abstractmethod
-    def discretize(self):
+    def _error_(self):
+        raise RuntimeError("This operator is not defined for the current task")
+
+    def waitFor(self, op):
         """
-        Build (or get) topologies required for this operator
-        and discretize all its variables.
+        @param op : a parmepy operator
+        Add an operator into 'wait' list of the present object.
+        It means that before any apply of this operator, all
+        (mpi) operations in op must be fulfilled, which implies
+        a call to op.wait().
         """
+        self._wait_list.append(op)
 
-    def _standard_discretize(self, nbGhosts=0):
+    def waitList(self):
         """
-        This functions provides a standard way to discretize the operator,
-        but some operators may need a specific discretization process.
+        @return the list of operators that must be
+        fulfilled before any attempt to apply the present operator.
         """
-        # One topo for all fields ...
-        if self.topology is not None:
-            self._discretize_single_topo(nbGhosts)
-        else:
-            # ... or one topo for each field.
-            self._discretize_multi_topo(nbGhosts)
+        return self._wait_list
 
-    def _discretize_single_topo(self, nbGhosts=0):
+    def wait(self):
         """
-        Discretization of all fields with the same topo,
-        predefined during initialization.
+        MPI wait for synchronisation: when this function is called,
+        the programm wait for the fulfillment of all the running
+        operations of this operator (mpi requests for example).
+        This is a blocking call.
         """
-        assert self.topology is not None
-        assert (self.topology.ghosts >= nbGhosts).all()
-        for v in self.variables:
-            self.discreteFields[v] = v.discretize(self.topology)
+        pass
 
-    def _discretize_multi_topo(self, nbGhosts=0):
+    def testRequests(self):
         """
-        A topo is build for each field, standard default mpi distribution,
-        with global mesh resolution given at init.
+        @return a boolean
+        MPI send/recv test for synchronisation: when this function is called,
+        the programm checks if this operator handles some uncomplete
+        mpi requests (if so return true, else false).
+        This is a non-blocking call.
         """
-        if self.ghosts is not None:
-            self.ghosts[self.ghosts < nbGhosts] = nbGhosts
-        else:
-            self.ghosts = np.ones((self.domain.dimension)) * nbGhosts
-        for v in self.variables:
-            topo = self.domain.getOrCreateTopology(self.domain.dimension,
-                                                   self.resolutions[v],
-                                                   ghosts=self.ghosts,
-                                                   comm=self._comm)
-            self.discreteFields[v] = v.discretize(topo)
-
-    def addRedistributeRequirement(self, red):
-        self.requirements.append(red)
-
-    def getRedistributeRequirement(self):
-        return self.requirements
+        pass
 
     @abstractmethod
-    def setUp(self):
+    def setup(self, rwork=None, iwork=None):
         """
         Last step of initialization. After this, the operator must be
-        ready for apply call.
-
-        Main step : setup for discrete operators.
-        """
-
-    @debug
-    def finalize(self):
-        """
-        Memory cleaning.
+        ready to apply.
+        In derived classes, called through @opsetup decorator.
         """
-        if self.discreteOperator is not None:
-            self.discreteOperator.finalize()
-            self.timer = self.timer + self.discreteOperator.timer
+        if not self.domain.currentTask() == self._mpis.task_id:
+            self.ontask = False
+            self._error_()
 
+    @abstractmethod
     @debug
     def apply(self, simulation=None):
         """
@@ -221,52 +186,126 @@ class Operator(object):
         @param simulation : object that describes the simulation
         parameters (time, time step, iteration number ...), see
         parmepy.problem.simulation.Simulation for details.
+        In derived classes, called through @opapply decorator.
         """
-        for req in self.requirements:
-            req.wait()
-        self.discreteOperator.apply(simulation)
+        for op in self.waitList():
+            op.wait()
 
+    def finalize(self):
+        """
+        Memory cleaning.
+        """
+        # wait for all remaining communications, if any
+        self.wait()
+
+    @abstractmethod
     def printComputeTime(self):
-        """ Time monitoring."""
-        if self.discreteOperator is not None:
-            self.discreteOperator.printComputeTime()
-            self.time_info = self.discreteOperator.time_info
-        else:
-            from parmepy.mpi.main_var import main_rank
-            shortName = str(self.__class__).rpartition('.')[-1][0:-2]
-            s = '[' + str(main_rank) + '] ' + shortName
-            s += " : operator not discretized --> no computation, time = 0."
-            print (s)
+        """
+        Time monitoring.
+        """
 
-    def isUp(self):
+    def is_up(self):
         """
         True if ready to be applied (--> setup function has
         been called succesfully)
         """
-        return self._isUpToDate
+        return self._is_uptodate
 
-    def __str__(self):
+    def _set_io(self, filename, buffshape):
         """
-        Common printings for operators
+        Set internal properties for (optional) file output.
+        This function is private and must not be called by
+        external object. It is usually called by operator
+        during construction (__init__).
+        @param filename : string default name for output.
+        @param buffshape : shape (numpy array) of the written buffer.
+        Must be 2D.
         """
-        shortName = str(self.__class__).rpartition('.')[-1][0:-2]
-        if self.discreteOperator is not None:
-            s = str(self.discreteOperator)
-        else:
-            s = shortName + " operator. Not discretised."
-        return s + "\n"
+        iopar = self.io_params
+        if iopar:
+            if isinstance(iopar, bool):
+                self.io_params = IO_params(filename)
+            self._writer = io.Writer(io_params=self.io_params,
+                                     mpi_params=self._mpis,
+                                     buffshape=buffshape)
 
-    def updateGhosts(self):
+    def task_id(self):
         """
-        Update ghost points values, if any.
+        @return id of the task on which this operator works.
         """
-        self.discreteOperator.updateGhosts()
+        return self._mpis.task_id
 
 
-class EmptyOperator(object):
-    """Empty operator"""
-    def __init__(self, *args, **kwargs):
-        pass
+import inspect
 
-    def apply(self, *args, **kwargs):
-        pass
+
+def opsetup(f):
+    """
+    Setup decorator: what must be done by all operators
+    at setup.
+    Usage : add @opsetup before setup class method
+    """
+
+    def decorator(*args, **kwargs):
+        # Job before setup of the function ...
+        # nothing for the moment
+        name = inspect.getmro(args[0].setup.im_class)
+        # call the setup function
+        retval = f(*args, **kwargs)
+        # Warning : we cannot call super(...) since
+        # it leads to infinite cycling when setup
+        # is not defined in the class but in its
+        # base class and when this base class is derived
+        # from Computational ...
+        # So we directly call Computational.setup()
+        # It's ugly but it seems to work.
+        # Job after setup of the function ...
+        name[-3].setup(args[0])
+        #super(args[0].__class__, args[0]).setup()
+        return retval
+
+    return decorator
+
+from parmepy.tools.profiler import ftime
+
+
+def opapply(f):
+    """
+    What must be done by all operators
+    before apply.
+    Usage : add @opapply before apply class method
+    """
+    def decorator(*args, **kwargs):
+        name = inspect.getmro(args[0].apply.im_class)
+        name[-3].setup(args[0])
+        #super(args[0].__class__, args[0]).apply()
+        t0 = ftime()
+        res = f(*args, **kwargs)
+        args[0].profiler[f.func_name] += ftime() - t0
+        return res
+
+    return decorator
+
+
+class Tools(object):
+    """
+    Static class with utilities related to operators
+    """
+
+    @staticmethod
+    def checkDevice(op):
+        """
+        @return true if op operates on a GPU
+        """
+        from parmepy.methods_keys import Support
+
+        try:
+            is_device = \
+                op.method[Support].find('gpu') >= 0
+        except KeyError:  # op.method is a dict not containing Support in keys
+            is_device = False
+        except IndexError:  # op.method is a string
+            is_device = False
+        except TypeError:  # op.method is None
+            is_device = False
+        return is_device
diff --git a/HySoP/hysop/operator/curlAndDiffusion.py b/HySoP/hysop/operator/curlAndDiffusion.py
index b231b0f41793c95bd5dc2a6f5b621af4afa85d9c..861aa25971b737933c5d754229f1b54c34f6e6e1 100644
--- a/HySoP/hysop/operator/curlAndDiffusion.py
+++ b/HySoP/hysop/operator/curlAndDiffusion.py
@@ -12,6 +12,7 @@ except ImportError:
     from parmepy.fakef2py import fftw2py
 from parmepy.operator.discrete.diffusion_fft import DiffusionFFT
 from parmepy.constants import debug
+from parmepy.operator.continuous import opsetup
 
 
 class CurlDiffusion(Operator):
@@ -42,7 +43,8 @@ class CurlDiffusion(Operator):
                           Do not use it.")
 
     @debug
-    def setUp(self):
+    @opsetup
+    def setup(self):
         """
         Diffusion operator discretization method.
         Create a discrete Diffusion operator from given specifications.
@@ -68,10 +70,10 @@ class CurlDiffusion(Operator):
             vd = v.discretize(topo)
             self.discreteFields[v] = vd
 
-        self.discreteOperator =\
+        self.discrete_op =\
             DiffusionFFT(self.discreteFields[self.velocity],
                          self.discreteFields[self.vorticity],
                          self.method, **self.config)
 
-        self.discreteOperator.setUp()
-        self._isUpToDate = True
+        self.discrete_op.setup()
+        self._is_uptodate = True
diff --git a/HySoP/hysop/operator/density.py b/HySoP/hysop/operator/density.py
index 043bda26b2074821c531ab24defcc1d18f77bc25..de0dce2c7da34c83fa2c0a25193a7b94637fdcd6 100644
--- a/HySoP/hysop/operator/density.py
+++ b/HySoP/hysop/operator/density.py
@@ -3,23 +3,22 @@
 @file operator/density.py
 
 """
-from parmepy.operator.continuous import Operator
+from parmepy.operator.computational import Computational
 from parmepy.operator.discrete.density import DensityVisco_d
+from parmepy.operator.continuous import opsetup
 from parmepy.constants import debug
 
 
-class DensityVisco(Operator):
+class DensityVisco(Computational):
     """
-    Density and Viscosity reconstruction : operator representation
+    Density and Viscosity reconstruction
     """
 
     @debug
     def __init__(self, density, viscosity, **kwds):
         """
-        Constructor.
-        Reconstruct the Density and the Viscosity scalars.
-
-        @param velocity ContinuousVectorField : velocity variable.
+        @param density : scalar field
+        @param viscosity : scalar field
         """
         super(DensityVisco, self).__init__(variables=[density, viscosity],
                                            **kwds)
@@ -32,16 +31,15 @@ class DensityVisco(Operator):
         super(DensityVisco, self)._standard_discretize()
 
     @debug
-    def setUp(self):
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
         """
         Density and Viscosity reconstruction operator discretization method.
         Create a discrete operator from given specifications.
         """
 
-        self.discreteOperator = \
-            DensityVisco_d(self.discreteFields[self.density],
-                           self.discreteFields[self.viscosity],
+        self.discrete_op = \
+            DensityVisco_d(density=self.discreteFields[self.density],
+                           viscosity=self.discreteFields[self.viscosity],
                            method=self.method)
-
-        self.discreteOperator.setUp()
-        self._isUpToDate = True
+        self._is_uptodate = True
diff --git a/HySoP/hysop/operator/differential.py b/HySoP/hysop/operator/differential.py
index d47664ee400364f68b8c180d7ff7aaf89283fb9e..406c33821a67e176f0e20a3691214503a8b3cc5e 100644
--- a/HySoP/hysop/operator/differential.py
+++ b/HySoP/hysop/operator/differential.py
@@ -3,20 +3,18 @@
 
 Differential operators
 """
-from parmepy.constants import debug, np
-from parmepy.operator.continuous import Operator
+from parmepy.constants import debug
+from parmepy.operator.computational import Computational
 from parmepy.operator.discrete.differential import CurlFFT, CurlFD, GradFD
 from parmepy.methods_keys import SpaceDiscretisation
-from parmepy.numerics.finite_differences import FD_C_4, FD_C_2
-try:
-    from parmepy.f2py import fftw2py
-except ImportError:
-    from parmepy.fakef2py import fftw2py
+from parmepy.operator.continuous import opsetup
+from parmepy.numerics.finite_differences import FD_C_4,\
+    FD_C_2, FiniteDifference
 import parmepy.default_methods as default
 from abc import ABCMeta, abstractmethod
 
 
-class Differential(Operator):
+class Differential(Computational):
     __metaclass__ = ABCMeta
 
     ## @debug
@@ -25,6 +23,7 @@ class Differential(Operator):
 
     @debug
     def __init__(self, invar, outvar, **kwds):
+        assert 'variables' not in kwds, 'variables parameter is useless.'
         super(Differential, self).__init__(variables=[invar, outvar], **kwds)
         if self.method is None:
             self.method = default.DIFFERENTIAL
@@ -34,89 +33,37 @@ class Differential(Operator):
         ## Curl of input
         self.outvar = outvar
 
-        if self.method[SpaceDiscretisation] is 'fftw':
-            self.resolution = self.resolutions[self.outvar]
-            assert self.resolution == self.resolutions[self.invar],\
-                'for fftw method, all variables must have\
-                the same global resolution.'
+        # Remark : at the time, all variables must have the same topology.
+        # This is implicitely checked with the assert on kwds['variables']:
+        # the only construction allowed is :
+        # (invar= ..., outvar=..., discretization=...)
         self.output = [outvar]
         self.input = [invar]
 
     def discretize(self):
 
-        domdim = self.domain.dimension
-        # Get main communicator
-        if self.topology is not None:
-            comm = self.topology.comm
-            commsize = self.topology.size
-        elif self._comm is not None:
-            comm = self._comm
-            commsize = self._comm.Get_size()
-        else:
-            from parmepy.mpi.main_var import main_comm as comm
-            from parmepy.mpi.main_var import main_size as commsize
-
         if self.method[SpaceDiscretisation] is 'fftw':
-            # FFTW case : init fftw (fortran) solver
-            localres, localoffset = fftw2py.init_fftw_solver(
-                self.resolution, self.domain.length, comm=comm.py2f())
-            topodims = np.ones(domdim)
-            topodims[-1] = commsize
-
-            # Case 1 : topology provided by user at init:
-            if self.topology is not None:
-                # Check if input topo is complient with fftw topo
-                assert (self.topology.shape == topodims).all(), 'input topology is\
-                        not compliant with fftw.'
-                self._discretize_single_topo()
-
-            else:
-                for v in self.variables:
-                    topo = self.domain.getOrCreateTopology(
-                        self.domain.dimension,
-                        self.resolution, topodims,
-                        precomputed=True,
-                        offset=localoffset,
-                        localres=localres,
-                        ghosts=self.ghosts,
-                        comm=self._comm)
-                    self.discreteFields[v] = v.discretize(topo)
-
-        elif str(self.method[SpaceDiscretisation]).find('FD_C') != -1:
+            super(Differential, self)._fftw_discretize()
+
+        elif self.method[SpaceDiscretisation] is FD_C_4:
             # Finite differences method
             # Minimal number of ghost points
-            if self.method[SpaceDiscretisation] is FD_C_4:
-                nbGhosts = 2
-            elif self.method[SpaceDiscretisation] is FD_C_2:
-                nbGhosts = 1
-
-            # Case 1 : topology provided by user at init:
-            if self.topology is not None:
-                self._discretize_single_topo(nbGhosts)
-
-            else:
-                if self.ghosts is None:
-                    self.ghosts = np.asarray([nbGhosts] * domdim)
-                else:
-                    assert (self.ghosts >= nbGhosts).all()
-
-                for v in self.variables:
-                    topo = self.domain.getOrCreateTopology(domdim,
-                                                           self.resolutions[v],
-                                                           ghosts=self.ghosts,
-                                                           comm=self._comm)
-                    self.discreteFields[v] = v.discretize(topo)
+            nbGhosts = 2
+            super(Differential, self)._standard_discretize(nbGhosts)
+        elif self.method[SpaceDiscretisation] is FD_C_2:
+            nbGhosts = 1
+            super(Differential, self)._standard_discretize(nbGhosts)
 
         else:
             raise ValueError("Unknown method for space discretization of the\
                 differential operator.")
 
-        assert self.discreteFields[self.invar].topology == \
-            self.discreteFields[self.outvar].topology, \
-            'Operator not yet implemented for multiple resolutions.'
+        msg = 'Operator not yet implemented for multiple resolutions.'
+        assert self._single_topo, msg
 
     @abstractmethod
-    def setUp(self):
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
         """
         Last step of initialization. After this, the operator must be
         ready for apply call.
@@ -129,42 +76,40 @@ class Curl(Differential):
     """
     Computes \f$ outVar = \nabla inVar \f$
     """
+    def get_work_properties(self):
+        if not self._is_discretized:
+            msg = 'The operator must be discretized '
+            msg += 'before any call to this function.'
+            raise RuntimeError(msg)
+        res = {'rwork': None, 'iwork': None}
+        if self.method[SpaceDiscretisation].mro()[1] is FiniteDifference:
+            from parmepy.numerics.differential_operations \
+                import Curl as NumCurl
+            work_length = NumCurl.getWorkLengths()
+            shape = self.discreteFields[self.invar].data[0].shape
+            res['rwork'] = []
+            for _ in xrange(work_length):
+                res['rwork'].append(shape)
+        return res
 
     @debug
-    def setUp(self):
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
         # Create a discrete operator, according to the chosen method
-        assert self.domain.dimension == 3, "Not yet implemented for dim < 3"
-        # todo : implement a 'false' 2D curl.
         if self.method[SpaceDiscretisation] is 'fftw':
-            self.discreteOperator = CurlFFT(self.discreteFields[self.invar],
-                                            self.discreteFields[self.outvar],
-                                            method=self.method)
-        elif str(self.method[SpaceDiscretisation]).find('FD_C') != -1:
-            self.discreteOperator = CurlFD(self.discreteFields[self.invar],
-                                           self.discreteFields[self.outvar],
-                                           method=self.method)
+            self.discrete_op = CurlFFT(
+                invar=self.discreteFields[self.invar],
+                outvar=self.discreteFields[self.outvar],
+                method=self.method, rwork=rwork)
+        elif self.method[SpaceDiscretisation].mro()[1] is FiniteDifference:
+            self.discrete_op = CurlFD(
+                invar=self.discreteFields[self.invar],
+                outvar=self.discreteFields[self.outvar],
+                method=self.method)
         else:
             raise ValueError("The required Space Discretisation is\
                 not available for Curl.")
-        self.discreteOperator.setUp()
-        self._isUpToDate = True
-
-    @staticmethod
-    def getWorkLengths(method=None, domain_dim=None):
-        """
-        Return the length of working arrays lists required
-        for curl discrete operator
-        @param method: useless, just there to fit with base class interface.
-        @param domain_dim : useless, just there
-        to fit with base class interface.
-        """
-        if method[SpaceDiscretisation] is FD_C_4:
-            return CurlFD.getWorkLengths()
-        else:
-            return 0
-
-    def setWorks(self, rwork=None, iwork=None):
-        self.discreteOperator.setWorks(rwork, iwork)
+        self._is_uptodate = True
 
 
 class Grad(Differential):
@@ -173,15 +118,15 @@ class Grad(Differential):
     """
 
     @debug
-    def setUp(self):
-        if str(self.method[SpaceDiscretisation]).find('FD_C') == -1:
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
+        if self.method[SpaceDiscretisation].mro()[1] is not FiniteDifference:
             raise ValueError("Grad operator only\
                 implemented with finite differences. Please change\
                 method[SpaceDiscretisation] value.")
         # Create a discrete operator, according to the chosen method
         # (only finite differences at the time).
-        self.discreteOperator = GradFD(self.discreteFields[self.invar],
-                                       self.discreteFields[self.outvar],
-                                       method=self.method)
-        self.discreteOperator.setUp()
-        self._isUpToDate = True
+        self.discrete_op = GradFD(invar=self.discreteFields[self.invar],
+                                       outvar=self.discreteFields[self.outvar],
+                                       method=self.method, rwork=rwork)
+        self._is_uptodate = True
diff --git a/HySoP/hysop/operator/diffusion.py b/HySoP/hysop/operator/diffusion.py
index fc5d7436146d469bb2717d7b4e1086a5cecb525d..1d118506e360ddf0a4f93437c659489bf9ff87dd 100644
--- a/HySoP/hysop/operator/diffusion.py
+++ b/HySoP/hysop/operator/diffusion.py
@@ -5,19 +5,15 @@
 Operator for diffusion problem.
 
 """
-from parmepy.operator.continuous import Operator
-try:
-    from parmepy.f2py import fftw2py
-except ImportError:
-    from parmepy.fakef2py import fftw2py
+from parmepy.operator.computational import Computational
 from parmepy.operator.discrete.diffusion_fft import DiffusionFFT
 from parmepy.constants import debug
+from parmepy.operator.continuous import opsetup
 from parmepy.methods_keys import SpaceDiscretisation
-import numpy as np
 import parmepy.default_methods as default
 
 
-class Diffusion(Operator):
+class Diffusion(Computational):
     """
     Diffusion operator
     \f{eqnarray*}
@@ -29,81 +25,56 @@ class Diffusion(Operator):
     """
 
     @debug
-    def __init__(self, vorticity, viscosity, **kwds):
+    def __init__(self, viscosity, vorticity=None, **kwds):
         """
         Constructor for the diffusion operator.
-        @param[in,out] vorticity : field \f$ \omega \f$
-        @param[in] resolution :  \f$ \omega \f$ global resolution.
-        @param[in] viscosity : \f$\nu\f$, viscosity of the considered medium.
+        @param[in,out] vorticity : vorticity field. If None, it must be passed
+        through variables argument
+        @param[in] viscosity : viscosity of the considered medium.
         """
-        super(Diffusion, self).__init__(variables=[vorticity], **kwds)
+        if vorticity is not None:
+            super(Diffusion, self).__init__(variables=[vorticity], **kwds)
+        else:
+            super(Diffusion, self).__init__(**kwds)
+
         # The only available method at the time is fftw
         if self.method is None:
             self.method = default.DIFFUSION
-
         ## input/output field, solution of the problem
-        self.vorticity = vorticity
+        if vorticity is not None:
+            self.vorticity = vorticity
+        else:
+            self.vorticity = self.variables.keys()[0]
         ## viscosity
         self.viscosity = viscosity
 
+        self.kwds = kwds
+
         self.input = [self.vorticity]
         self.output = [self.vorticity]
 
     def discretize(self):
-        # The only available solver is fftw
-        if self.method[SpaceDiscretisation] is not 'fftw':
-            print (self.method)
-            raise AttributeError("Method not yet implemented.")
-
-        # Compute local resolution/distribution of data
-        # according to fftw requirements.
-        if self.topology is not None:
-            comm = self.topology.comm
-        elif self._comm is not None:
-            comm = self._comm
+        if self.method[SpaceDiscretisation] is 'fftw':
+            super(Diffusion, self)._fftw_discretize()
+        elif self.method[SpaceDiscretisation] is 'fd':
+            super(Diffusion, self)._standard_discretize()
         else:
-            from parmepy.mpi.main_var import main_comm as comm
-        localres, localoffset = fftw2py.init_fftw_solver(
-            self.resolutions[self.vorticity],
-            self.domain.length, comm=comm.py2f())
-
-        if self.topology is not None:
-            main_size = self.topology.size
-        elif self._comm is not None:
-            main_size = self._comm.Get_size()
-        else:
-            from parmepy.mpi.main_var import main_size
-        topodims = np.ones((self.domain.dimension))
-        topodims[-1] = main_size
-        #variables discretization
-        if self.ghosts is not None:
-            raise AttributeError("Ghosts points not yet\
-            implemented for diffusion operator.")
-        if self.topology is not None:
-            assert (self.topology.shape == topodims).all(), 'input topology is\
-                    not compliant with fftw.'
-            for v in self.variables:
-                self.discreteFields[v] = v.discretize(self.topology)
-        else:
-            for v in self.variables:
-                topo = self.domain.getOrCreateTopology(
-                    self.domain.dimension,
-                    self.resolutions[self.vorticity], topodims,
-                    precomputed=True,
-                    offset=localoffset,
-                    localres=localres,
-                    ghosts=self.ghosts,
-                    comm=self._comm)
-                self.discreteFields[v] = v.discretize(topo)
+            raise AttributeError("Method not yet implemented.")
 
     @debug
-    def setUp(self):
-        """
-        Diffusion operator setup : discretization of fields and operator.
-        """
-        self.discreteOperator = DiffusionFFT(
-            self.discreteFields[self.vorticity], self.viscosity,
-            method=self.method)
-
-        self.discreteOperator.setUp()
-        self._isUpToDate = True
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
+        if self.method[SpaceDiscretisation] is 'fftw':
+            self.discrete_op = DiffusionFFT(
+                self.discreteFields[self.vorticity], self.viscosity,
+                method=self.method)
+        elif self.method[SpaceDiscretisation] is 'fd':
+            from parmepy.gpu.gpu_diffusion import GPUDiffusion
+            kw = self.kwds.copy()
+            if 'discretization' in kw.keys():
+                kw.pop('discretization')
+            self.discrete_op = GPUDiffusion(
+                self.discreteFields[self.vorticity],
+                viscosity=self.viscosity,
+                **kw)
+        self._is_uptodate = True
diff --git a/HySoP/hysop/operator/discrete/__init__.py b/HySoP/hysop/operator/discrete/__init__.py
index 4425fd47108c3b15af3ffff11dd05f14ae2c75ee..0d41efad13d2a33642a49b12866e042505c64452 100644
--- a/HySoP/hysop/operator/discrete/__init__.py
+++ b/HySoP/hysop/operator/discrete/__init__.py
@@ -15,10 +15,10 @@
 #                                scal: nbElem},
 #                   method = 'scales, p_M4',)
 # ...
-# advec.setUp()
+# advec.setup()
 # ...
 # advec.apply()
 # \endcode
 #
-# setUp call will result in the creation of a ScalesAdvection operator
+# setup call will result in the creation of a ScalesAdvection operator
 # and apply will perform a call to scale's solver.
diff --git a/HySoP/hysop/operator/discrete/adapt_timestep.py b/HySoP/hysop/operator/discrete/adapt_timestep.py
index 8c2b38e477348eb89728d042ceb1271defaaf6ae..e1ff050e8a2bd6eb30da7da8a0f37ad6d017f9ee 100755
--- a/HySoP/hysop/operator/discrete/adapt_timestep.py
+++ b/HySoP/hysop/operator/discrete/adapt_timestep.py
@@ -8,18 +8,13 @@ Evaluation of the adaptative time step according to the flow fields.
 from parmepy.constants import debug
 from parmepy.methods_keys import TimeIntegrator, SpaceDiscretisation,\
     dtCrit
-from parmepy.numerics.finite_differences import FD_C_4
 from parmepy.operator.discrete.discrete import DiscreteOperator
-from parmepy.numerics.integrators.euler import Euler
-from parmepy.numerics.integrators.runge_kutta2 import RK2
-from parmepy.numerics.integrators.runge_kutta3 import RK3
-from parmepy.numerics.integrators.runge_kutta4 import RK4
 from parmepy.numerics.differential_operations import GradV
 import parmepy.tools.numpywrappers as npw
-from parmepy.numerics.updateGhosts import UpdateGhosts
+from parmepy.numerics.update_ghosts import UpdateGhosts
 from parmepy.mpi import MPI
 from parmepy.constants import np, PARMES_MPI_REAL
-import parmepy.tools.io_utils as io
+from parmepy.tools.profiler import profile
 
 
 class AdaptTimeStep_D(DiscreteOperator):
@@ -30,22 +25,16 @@ class AdaptTimeStep_D(DiscreteOperator):
     """
 
     @debug
-    def __init__(self, velocity, vorticity, dt_adapt, method=None,
-                 lcfl=0.125, cfl=0.5, io_params=None, time_range=None):
+    def __init__(self, velocity, vorticity, simulation,
+                 lcfl=0.125, cfl=0.5, time_range=None, maxdt=9999., **kwds):
         """
-        @param velocity : discrete field
-        @param vorticity : discrete field
+        @param velocity : discretization of the velocity field
+        @param vorticity : discretization of the vorticity field
         @param dt_adapt : adaptative timestep
         (a parmepy.variable_parameter.VariableParameter)
-        @param method : numerical method for space/time discretizations
         @param lcfl : the lagrangian CFL coefficient used
         for advection stability
         @param cfl : the CFL coefficient.
-        @param filename : output file name
-        @param io_params : parameters (dict) to set file output.
-        If  None, no output. Set io_params = {} if you want output,
-        with default parameters values.
-        See parmepy.tools.io_utils.Writer for details
         @param time_range : [start, end] use to define a 'window' in which
         the current operator is applied. Outside start-end, this operator
         has no effect. Start/end are iteration numbers.
@@ -56,41 +45,21 @@ class AdaptTimeStep_D(DiscreteOperator):
         ## vorticity discrete field
         self.vorticity = vorticity
         ## adaptative time step variable
-        self.dt_adapt = dt_adapt
-        if method is None:
-            method = {TimeIntegrator: RK3, SpaceDiscretisation: FD_C_4,
-                      dtCrit: 'vort'}
-        DiscreteOperator.__init__(self, [self.velocity, self.vorticity],
-                                  method=method)
+        from parmepy.problem.simulation import Simulation
+        assert isinstance(simulation, Simulation)
+        self.simulation = simulation
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        super(AdaptTimeStep_D, self).__init__(variables=[velocity, vorticity],
+                                              **kwds)
 
-        self.input = [self.velocity, self.vorticity]
+        self.input = self.variables
         self.output = [self.vorticity]
-        topo = self.velocity.topology
-        io_params["writebuffshape"] = (1, 7)
-        if io_params is None:
-            self._writer = None
-        else:
-            if not "filename" in io_params:
-                io_params["filename"] = "dt_adapt"
-            if topo is not None:
-                self._writer = io.Writer(io_params, topo.comm)
-
-        ## Courant Fredrich Levy constants
-        self.lcfl = lcfl
+        ## Courant Fredrich Levy coefficient
         self.cfl = cfl
-
-        # Definition of stability coefficient for stretching operator
-        self.cststretch = 0.
-        timeint = self.method[TimeIntegrator]
-        classtype = timeint.mro()[0]
-        if classtype is Euler:
-            self.coef_stretch = 2.0
-        elif classtype is RK2:
-            self.coef_stretch = 2.0
-        elif classtype is RK3:
-            self.coef_stretch = 2.5127
-        elif classtype is RK4:
-            self.coef_stretch = 2.7853
+        ## Lagrangian CFL coefficient
+        self.lcfl = lcfl
+        ## Max. timestep
+        self.maxdt = maxdt
 
         # Definition of criterion for dt_advec computation
         self.dtCrit = self.method[dtCrit]
@@ -105,7 +74,7 @@ class AdaptTimeStep_D(DiscreteOperator):
         # [time, dt, d1, d2, d3, d4, d5]
         # for d1...d5 see computation details in apply.
         self.diagnostics = npw.zeros((7))
-        self._t_diagnostics = np.empty_like(self.diagnostics)
+        self._t_diagnostics = npw.zeros_like(self.diagnostics)
 
         # All diagnostcs function  definition:
         # (Index in self.diagnostics, function, is gradU needed)
@@ -129,7 +98,49 @@ class AdaptTimeStep_D(DiscreteOperator):
 
         # Definition of dt:
         self.get_all_dt = []
+        self._prepare_dt_list()
+
+        # prepare ghost points synchro for velocity
+        self._synchronize = UpdateGhosts(self.velocity.topology,
+                                         self.velocity.nbComponents)
+        # gradU function
+        self._function = GradV(self.velocity.topology,
+                               method=self.method[SpaceDiscretisation])
+
+    def _set_work_arrays(self, rwork=None, iwork=None):
+        memshape = self.velocity.data[0].shape
+        worklength = self.velocity.nbComponents ** 2
+        # rwork is used to save gradU
+        if rwork is None:
+            self._rwork = [npw.zeros(memshape) for _ in xrange(worklength)]
+
+        else:
+            assert isinstance(rwork, list), 'rwork must be a list.'
+            self._rwork = rwork
+            assert len(self._rwork) == worklength
+            for wk in self._rwork:
+                assert wk.shape == memshape
 
+    @staticmethod
+    def _compute_stability_coeff(timeint):
+        from parmepy.numerics.integrators.euler import Euler
+        from parmepy.numerics.integrators.runge_kutta2 import RK2
+        from parmepy.numerics.integrators.runge_kutta3 import RK3
+        from parmepy.numerics.integrators.runge_kutta4 import RK4
+        # Definition of stability coefficient for stretching operator
+        coef_stretch = 0.0
+        classtype = timeint.mro()[0]
+        if classtype is Euler:
+            coef_stretch = 2.0
+        elif classtype is RK2:
+            coef_stretch = 2.0
+        elif classtype is RK3:
+            coef_stretch = 2.5127
+        elif classtype is RK4:
+            coef_stretch = 2.7853
+        return coef_stretch
+
+    def _prepare_dt_list(self):
         # definition of dt_advection
         if 'gradU' in self.dtCrit:
             # => based on gradU
@@ -144,33 +155,20 @@ class AdaptTimeStep_D(DiscreteOperator):
             self.get_all_dt.append(
                 lambda diagnostics: self.lcfl / diagnostics[5])
         if 'stretch' in self.dtCrit:
+            coeff_stretch = self._compute_stability_coeff(
+                self.method[TimeIntegrator])
             self.get_all_dt.append(
-                lambda diagnostics: self.coef_stretch / diagnostics[3])
+                lambda diagnostics: coeff_stretch / diagnostics[3])
         if 'cfl' in self.dtCrit:
             h = self.velocity.topology.mesh.space_step[0]
             self.get_all_dt.append(
                 lambda diagnostics: (self.cfl * h) / diagnostics[4])
 
-    def setUp(self):
-
-        # prepare ghost points synchro for velocity
-        self._synchronize = UpdateGhosts(self.velocity.topology,
-                                         self.velocity.nbComponents)
-        # gradU function
-        self._function = GradV(self.velocity.topology,
-                               method=self.method[SpaceDiscretisation])
-        memshape = self.velocity.data[0].shape
-        worklength = self.velocity.nbComponents ** 2
-        # gradU result array.
-        self.grad = [npw.zeros(memshape) for i in xrange(worklength)]
-
-        self._isUpToDate = True
-
     def _gradU(self):
         # Synchronize ghost points of velocity
         self._synchronize(self.velocity.data)
         # gradU computation
-        self.grad = self._function(self.velocity.data, self.grad)
+        self._rwork = self._function(self.velocity.data, self._rwork)
 
     def _compute_gradU(self):
         res = 0.
@@ -178,7 +176,8 @@ class AdaptTimeStep_D(DiscreteOperator):
         for direc in xrange(nbComponents):
             # maxima of partial derivatives of velocity :
             # needed for advection stability condition (1st option)
-            res = max(res, np.max(abs(self.grad[(nbComponents + 1) * direc])))
+            res = max(res, np.max(abs(self._rwork[(nbComponents + 1)
+                                                  * direc])))
         return res
 
     def _compute_stretch(self):
@@ -187,10 +186,9 @@ class AdaptTimeStep_D(DiscreteOperator):
         for direc in xrange(nbComponents):
             # maxima of partial derivatives of velocity:
             # needed for stretching stability condition
-            tmp = np.max(sum([abs(self.grad[i])
+            tmp = np.max(sum([abs(self._rwork[i])
                               for i in xrange(nbComponents * direc,
-                                              nbComponents * (direc + 1))
-                              ]))
+                                              nbComponents * (direc + 1))]))
             res = max(res, tmp)
         return res
 
@@ -205,38 +203,39 @@ class AdaptTimeStep_D(DiscreteOperator):
 
     def _compute_deform(self):
         # 1/2(gradU + gradU^T) computation
-        self.grad[1] += self.grad[3]
-        self.grad[2] += self.grad[6]
-        self.grad[5] += self.grad[7]
-        self.grad[1] *= 0.5
-        self.grad[2] *= 0.5
-        self.grad[5] *= 0.5
-        self.grad[3][...] = self.grad[1][...]
-        self.grad[6][...] = self.grad[2][...]
-        self.grad[7][...] = self.grad[5][...]
+        self._rwork[1] += self._rwork[3]
+        self._rwork[2] += self._rwork[6]
+        self._rwork[5] += self._rwork[7]
+        self._rwork[1] *= 0.5
+        self._rwork[2] *= 0.5
+        self._rwork[5] *= 0.5
+        self._rwork[3][...] = self._rwork[1][...]
+        self._rwork[6][...] = self._rwork[2][...]
+        self._rwork[7][...] = self._rwork[5][...]
         # maxima of deformation tensor:
         # needed for advection stability condition (3rd option)
         res = 0.
         nbComponents = self.velocity.nbComponents
         for direc in xrange(nbComponents):
-            tmp = np.max(sum([abs(self.grad[i])
+            tmp = np.max(sum([abs(self._rwork[i])
                               for i in xrange(nbComponents * direc,
                                               nbComponents * (direc + 1))
                               ]))
             res = max(res, tmp)
         return res
 
+    @debug
+    @profile
     def apply(self, simulation=None):
-        if simulation is None:
-            raise ValueError("Missing simulation value for computation.")
+        if simulation is not None:
+            assert self.simulation is simulation
+
         # current time
-        time = simulation.time
-        iteration = simulation.currentIteration
-        Nmax = min(simulation.iterMax, self.time_range[1])
+        time = self.simulation.time
+        iteration = self.simulation.currentIteration
+        Nmax = min(self.simulation.iterMax, self.time_range[1])
         self.diagnostics[0] = time
         if iteration >= self.time_range[0] and iteration <= Nmax:
-            # Calling for requirements completion
-            DiscreteOperator.apply(self, simulation)
             if self._is_gradU_needed:
                 self._gradU()
             for func in self._used_functions:
@@ -249,13 +248,14 @@ class AdaptTimeStep_D(DiscreteOperator):
                 op=MPI.MAX)
             self.diagnostics[...] = self._t_diagnostics
 
-            self.dt_adapt['dt'] = np.min(
-                [dt(self.diagnostics) for dt in self.get_all_dt])
-
-            self.diagnostics[1] = self.dt_adapt['dt']
-            if self._writer is not None and self._writer.doWrite(iteration):
+            dt = np.min([dt(self.diagnostics) for dt in self.get_all_dt] +
+                        [self.maxdt])
+            self.diagnostics[1] = dt
+            if self._writer is not None and self._writer.do_write(iteration):
                 self._writer.buffer[0, :] = self.diagnostics
                 self._writer.write()
 
             # Update simulation time step with the new dt
-            simulation.updateTimeStep(self.dt_adapt['dt'])
+            self.simulation.updateTimeStep(dt)
+            # Warning this update is done only for the current MPI task!
+            # See wait function in base class.
diff --git a/HySoP/hysop/operator/discrete/baroclinic.py b/HySoP/hysop/operator/discrete/baroclinic.py
index 54fe7839db40f8100fa26695aebf625e43d820ee..63f926fe2b2e57d6628f0ce7c1f8cdfec59a0438 100644
--- a/HySoP/hysop/operator/discrete/baroclinic.py
+++ b/HySoP/hysop/operator/discrete/baroclinic.py
@@ -1,78 +1,68 @@
 # -*- coding: utf-8 -*-
 """
-@file operator/discrete/multiphase.py
+@file operator/discrete/baroclinic.py
 Discrete MultiPhase Rot Grad P
 """
 from parmepy.operator.discrete.discrete import DiscreteOperator
 import parmepy.numerics.differential_operations as diff_op
-from parmepy.constants import np, debug, XDIR, YDIR, ZDIR, \
-    PARMES_REAL, ORDER
+from parmepy.constants import debug, XDIR, YDIR, ZDIR
 from parmepy.methods_keys import SpaceDiscretisation
-from parmepy.numerics.finite_differences import FD_C_4, FD_C_2
-from parmepy.numerics.updateGhosts import UpdateGhosts
-from parmepy.tools.timers import timed_function
+from parmepy.numerics.update_ghosts import UpdateGhosts
+from parmepy.tools.profiler import ftime
+import parmepy.tools.numpywrappers as npw
 
 
-class Baroclinic_d(DiscreteOperator):
+class Baroclinic(DiscreteOperator):
     """
-
+    TODO : describe this operator ...
     """
     @debug
     def __init__(self, velocity, vorticity, density, viscosity,
-                 method={SpaceDiscretisation: FD_C_2}, formula=None):
+                 formula=None, **kwds):
         """
         Constructor.
         Create the baroclinic term -GradRho/rho x GradP/rho
         in N.S equation
-        @param velocity : discrete vector field
-        @param vorticity : discrete vector field
-        @param density : discrete scalar field
+        @param velocity : discretization of the velocity field
+        @param vorticity : discretization of the vorticity field
+        @param density : discretization of a scalar field
         @param viscosity
-        @param method : numerical method for space discretizations
-        @param formula : formula to initialize u^(n-1) 
-        Note : this should be the formula used to initialize 
+        @param formula : formula to initialize u^(n-1)
+        Note : this should be the formula used to initialize
         the velocity field
-
         """
-        DiscreteOperator.__init__(self, [velocity, vorticity, density,
-                                         viscosity], method=method)
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        if 'method' not in kwds:
+            import parmepy.default_methods as default
+            kwds['method'] = default.BAROCLINIC
 
+        super(Baroclinic, self).__init__(variables=[velocity, vorticity,
+                                                    density], **kwds)
         self.velocity = velocity
-        self.velocity_old = np.zeros_like(self.velocity.data, 
-                                          dtype=PARMES_REAL, 
-                                          order=ORDER)
+        self.velocity_old = npw.zeros_like(self.velocity.data)
         self.vorticity = vorticity
         self.density = density
         self.viscosity = viscosity
-        self.formula = formula
         self.input = [self.velocity, self.vorticity, self.density]
         self.output = [self.vorticity]
 
-        # prepare ghost points synchro for velocity (vector) 
+        # prepare ghost points synchro for velocity (vector)
         # and density (scalar) fields
         self._synchronizeVel = UpdateGhosts(self.velocity.topology,
                                             self.velocity.nbComponents)
         self._synchronizeRho = UpdateGhosts(self.density.topology,
                                             self.density.nbComponents)
 
-    def setUp(self):
-        """
-        u^(n-1) initialization : u^(n-1) = u(t=0).
-        """
-        currentTime = 0.
-        arg_list = self.velocity.topology.mesh.coords + (currentTime,)
-        self.velocity_old = self.formula(self.velocity_old, *arg_list)
+        # u^(n-1) initialization : u^(n-1) = u(t=0).
+        time = 0.
+        arg_list = self.velocity.topology.mesh.coords + (time,)
+        self.velocity_old = formula(self.velocity_old, *arg_list)
 
     @debug
-    @timed_function
     def apply(self, simulation=None):
         if simulation is None:
             raise ValueError("Missing simulation value for computation.")
 
-        # Calling for requirements completion
-        DiscreteOperator.apply(self, simulation)
-
-        t = simulation.time
         dt = simulation.timeStep
         # Synchronize ghost points of velocity and density
         self._synchronizeVel(self.velocity.data)
@@ -82,44 +72,40 @@ class Baroclinic_d(DiscreteOperator):
         iCompute = topo.mesh.iCompute
         gradOp = diff_op.GradS(topo, self.method[SpaceDiscretisation])
 
-        result = np.zeros_like(self.velocity.data, 
-                               dtype=PARMES_REAL, order=ORDER)
+        result = npw.zeros_like(self.velocity.data)
 
         # result = du/dt
         for d in xrange(self.velocity.dimension):
-            result[d][iCompute] = (self.velocity[d][iCompute] - 
+            result[d][iCompute] = (self.velocity[d][iCompute] -
                                    self.velocity_old[d][iCompute]) / dt
 
         # result = result + (u. grad)u
         # (u. grad)u = (u.du/dx + v.du/dy + w.du/dz ;
         #               u.dv/dx + v.dv/dy + w.dv/dz ;
         #               u.dw/dx + v.dw/dy + w.dw/dz)
-        UgradU = np.zeros_like(self.velocity.data, 
-                               dtype=PARMES_REAL, order=ORDER)
-        tempGrad = np.zeros_like(self.velocity.data, 
-                                 dtype=PARMES_REAL, order=ORDER)
+        UgradU = npw.zeros_like(self.velocity.data)
+        tempGrad = npw.zeros_like(self.velocity.data)
         tempGrad = gradOp(self.velocity[XDIR], tempGrad)
         for d in xrange(self.velocity.dimension):
-            UgradU[XDIR][iCompute] += self.velocity[d][iCompute] \
-                                      * tempGrad[d][iCompute]
+            UgradU[XDIR][iCompute] += \
+                self.velocity[d][iCompute] * tempGrad[d][iCompute]
         tempGrad = gradOp(self.velocity[YDIR], tempGrad)
         for d in xrange(self.velocity.dimension):
-            UgradU[YDIR][iCompute] += self.velocity[d][iCompute] \
-                                      * tempGrad[d][iCompute]
+            UgradU[YDIR][iCompute] += \
+                self.velocity[d][iCompute] * tempGrad[d][iCompute]
         tempGrad = gradOp(self.velocity[ZDIR], tempGrad)
         for d in xrange(self.velocity.dimension):
-            UgradU[ZDIR][iCompute] += self.velocity[d][iCompute] \
-                                      * tempGrad[d][iCompute]
+            UgradU[ZDIR][iCompute] += \
+                self.velocity[d][iCompute] * tempGrad[d][iCompute]
 
         for d in xrange(self.velocity.dimension):
             result[d][iCompute] += UgradU[d][iCompute]
 
         ## result = result - nu*\Laplacian u (-g) = gradP/rho
-        viscousTerm = np.zeros_like(self.velocity.data, 
-                                    dtype=PARMES_REAL, order=ORDER)
+        viscousTerm = npw.zeros_like(self.velocity.data)
 
         laplacian = diff_op.Laplacian(topo)
-        fd_scheme = FD_C_2((topo.mesh.space_step))
+        # fd_scheme = FD_C_2((topo.mesh.space_step))
         laplacian.fd_scheme.computeIndices(iCompute)
 
         for d in xrange(self.velocity.dimension):
@@ -132,8 +118,7 @@ class Baroclinic_d(DiscreteOperator):
 #                result[d][iCompute] -= -1.0
 
         ## baroclinicTerm = -(gradRho/rho) x (gradP/rho)
-        gradRho_rho = np.zeros_like(self.velocity.data, 
-                                    dtype=PARMES_REAL, order=ORDER)
+        gradRho_rho = npw.zeros_like(self.velocity.data)
         gradRho_rho = gradOp(self.density[0], gradRho_rho)
 
         ## To comment out if the advected scalar is log(rho) :
@@ -141,20 +126,16 @@ class Baroclinic_d(DiscreteOperator):
 #            gradRho_rho[d][iCompute] = gradRho_rho[d][iCompute] / \
 #                                       self.density[0][iCompute]
 
-        baroclinicTerm = np.zeros_like(result, dtype=PARMES_REAL, 
-                                       order=ORDER)
+        baroclinicTerm = npw.zeros_like(result)
         baroclinicTerm[0][iCompute] = - gradRho_rho[1][iCompute] * \
-                                        result[2][iCompute] + \
-                                      gradRho_rho[2][iCompute] * \
-                                        result[1][iCompute]
+            result[2][iCompute] + gradRho_rho[2][iCompute] \
+            * result[1][iCompute]
         baroclinicTerm[1][iCompute] = - gradRho_rho[2][iCompute] * \
-                                        result[0][iCompute] + \
-                                      gradRho_rho[0][iCompute] * \
-                                        result[2][iCompute]
+            result[0][iCompute] + gradRho_rho[0][iCompute] * \
+            result[2][iCompute]
         baroclinicTerm[2][iCompute] = - gradRho_rho[0][iCompute] * \
-                                        result[1][iCompute] + \
-                                      gradRho_rho[1][iCompute] * \
-                                        result[0][iCompute]
+            result[1][iCompute] + gradRho_rho[1][iCompute] * \
+            result[0][iCompute]
 
         ## vorti(n+1) = vorti(n) + dt * baroclinicTerm
         for d in xrange(self.vorticity.dimension):
diff --git a/HySoP/hysop/operator/discrete/density.py b/HySoP/hysop/operator/discrete/density.py
index 7d40d8fba021f8342607bef1816075d589cad5d9..71b91f6f07de5829623f6583a087bbb4b4df3402 100644
--- a/HySoP/hysop/operator/discrete/density.py
+++ b/HySoP/hysop/operator/discrete/density.py
@@ -3,55 +3,57 @@
 @file operator/discrete/density.py
 Discrete MultiPhase Rot Grad P
 """
-from discrete import DiscreteOperator
+from parmepy.operator.discrete.discrete import DiscreteOperator
 from parmepy.constants import np, debug
-from parmepy.tools.timers import timed_function
+from parmepy.tools.profiler import profile
 
 
 class DensityVisco_d(DiscreteOperator):
     """
-
+    To be documented ...
     """
     @debug
     def __init__(self, density, viscosity,
-                 method=None, densityVal=None, viscoVal=None):
+                 densityVal=None, viscoVal=None, **kwds):
         """
-        Constructor.
-        Reconstruct the Density and the Viscosity scalars.
         @param operator.
         """
-        DiscreteOperator.__init__(self, [density, viscosity],
-                                  method, name="DensityVisco_d")
+        if 'variables' in kwds:
+            super(DensityVisco_d, self).__init__(**kwds)
+            self.density = self.variables[0]
+            self.viscosity = self.variables[1]
+        else:
+            super(DensityVisco_d, self).__init__(variables=[density,
+                                                            viscosity],
+                                                 **kwds)
+            self.density = density
+            self.viscosity = viscosity
 
-        self.density = density
-        self.viscosity = viscosity
         self.densityVal = densityVal
         self.viscoVal = viscoVal
         self.input = [self.density, self.viscosity]
         self.output = [self.density, self.viscosity]
 
+        # Note FP : what must be done if densityVal or viscoVal is None???
+
     @debug
-    @timed_function
+    @profile
     def apply(self, simulation=None):
-        if simulation is None:
-            raise ValueError("Missing simulation value for computation.")
-        # Calling for requirements completion
-        DiscreteOperator.apply(self, simulation)
+        assert simulation is not None, \
+            "Missing simulation value for computation."
 
-        dt = simulation.timeStep
         iCompute = self.density.topology.mesh.iCompute
 
         # Density reconstruction
-        if self.density[iCompute].all() <= np.absolute(self.densityVal[1] -
-                                                       self.densityVal[0]) / 2.0 :
-            self.density[iCompute] = self.densityVal[1]
-        else :
-            self.density[iCompute] = self.densityVal[0]
+        if self.density[0][iCompute].all() <= np.absolute(
+                self.densityVal[1] - self.densityVal[0]) / 2.0:
+            self.density[0][iCompute] = self.densityVal[1]
+        else:
+            self.density[0][iCompute] = self.densityVal[0]
 
         # Viscosity reconstruction :
         # nu = nu1 + (nu2 - nu1) * (density - rho1)/(rho2 - rho1)
-        self.viscosity.data = self.viscoVal[0] + \
-                              (self.viscoVal[1] - self.viscoVal[0]) * \
-                              ((self.density.data - self.densityVal[0]) / \
-                              (self.densityVal[1] - self.densityVal[0]))
-
+        self.viscosity.data[0] = self.viscoVal[0] + \
+            (self.viscoVal[1] - self.viscoVal[0]) * \
+            ((self.density.data[0] - self.densityVal[0]) /
+             (self.densityVal[1] - self.densityVal[0]))
diff --git a/HySoP/hysop/operator/discrete/differential.py b/HySoP/hysop/operator/discrete/differential.py
index 0ca0def6704752bbbe69540c953704f0b4318921..f8dbc0d1ed727443e8f75a8d76db89c9e74551be 100644
--- a/HySoP/hysop/operator/discrete/differential.py
+++ b/HySoP/hysop/operator/discrete/differential.py
@@ -9,13 +9,14 @@ from parmepy.operator.discrete.discrete import DiscreteOperator
 from parmepy.numerics.differential_operations import Curl, GradV
 import parmepy.tools.numpywrappers as npw
 from abc import ABCMeta, abstractmethod
-from parmepy.numerics.updateGhosts import UpdateGhosts
+from parmepy.numerics.update_ghosts import UpdateGhosts
 from parmepy.methods_keys import SpaceDiscretisation
 try:
     from parmepy.f2py import fftw2py
 except ImportError:
     from parmepy.fakef2py import fftw2py
 import parmepy.default_methods as default
+from parmepy.tools.profiler import profile
 
 
 class Differential(DiscreteOperator):
@@ -26,35 +27,30 @@ class Differential(DiscreteOperator):
     ##     return object.__new__(cls, *args, **kw)
 
     @debug
-    def __init__(self, invar, outvar, method=None):
+    def __init__(self, invar, outvar, **kwds):
         """
         @param[in] invar : input field
         @param[in,out] outvar : Grad of the input field.
         Warning : must be a field with dim * invar.nbcomponents,
         dim being the domain dimension.
-        @param[in] method : how to compute the grad? Default = finite
-        differences, 4th order.
         """
         self.invar = invar
         self.outvar = outvar
-        if method is None:
-            method = default.DIFFERENTIAL
-        DiscreteOperator.__init__(self, [self.invar, self.outvar],
-                                  method=method)
+        if 'method' not in kwds:
+            kwds['method'] = default.DIFFERENTIAL
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        super(Differential, self).__init__(variables=[invar, outvar],
+                                           **kwds)
         self.input = [self.invar]
         self.output = [self.outvar]
-        self._dim = self.invar.topology.domain.dimension
         self._synchronize = None
-
-    @staticmethod
-    def getWorkLengths(method=None, domain_dim=None):
-        assert method is None
-        assert domain_dim is None
-        return 0.
+        # connexion to a numerical method
+        self._function = None
 
     @abstractmethod
     def apply(self, simulation=None):
         """
+        Abstract interface
         """
 
 
@@ -63,16 +59,36 @@ class CurlFFT(Differential):
     Compute the curl of a discrete field, using Fourier fftw
     """
 
+    def __init__(self, **kwds):
+        super(CurlFFT, self).__init__(**kwds)
+        if self.domain.dimension == 3:
+            self._apply = self._apply3D
+        elif self.domain.dimension == 2:
+            raise ValueError('Not yet implemented.')
+            #self._apply = self._apply2D
+
     def apply(self, simulation=None):
-        DiscreteOperator.apply(self, simulation)
-        ghosts_in = self.invar.topology.ghosts
-        ghosts_out = self.outvar.topology.ghosts
+        self._apply()
+
+    @debug
+    @profile
+    def _apply3D(self):
+        ghosts_in = self.invar.topology.ghosts()
+        ghosts_out = self.outvar.topology.ghosts()
         self.outvar.data[0], self.outvar.data[1], self.outvar.data[2] = \
             fftw2py.solve_curl_3d(self.invar.data[0], self.invar.data[1],
                                   self.invar.data[2], self.outvar.data[0],
                                   self.outvar.data[1], self.outvar.data[2],
                                   ghosts_in, ghosts_out)
 
+    def _apply2D(self):
+        ghosts_in = self.invar.topology.ghosts()
+        ghosts_out = self.outvar.topology.ghosts()
+        self.outvar.data[0], self.outvar.data[1], self.outvar.data[2] = \
+            fftw2py.solve_curl_2d(self.invar.data[0], self.invar.data[1],
+                                  self.outvar.data[0],
+                                  ghosts_in, ghosts_out)
+
     def finalize(self):
         """
         Clean memory (fftw plans and so on)
@@ -85,31 +101,29 @@ class CurlFD(Differential):
     Compute the curl of a discrete field, using finite differences.
     """
 
-    def setUp(self):
+    def __init__(self, **kwds):
+
+        super(CurlFD, self).__init__(**kwds)
+
         # prepare ghost points synchro for velocity
         self._synchronize = UpdateGhosts(self.invar.topology,
                                          self.invar.nbComponents)
+        self._function = Curl(self.invar.topology, self._rwork,
+                              self.method[SpaceDiscretisation])
 
-        worklength = self.getWorkLengths()
+    def _set_work_arrays(self, rwork, iwork):
+        worklength = Curl.getWorkLengths()
         memshape = self.invar.data[0].shape
-        if not self.hasExternalWork:
-            self._rwork = [npw.zeros(memshape) for i in xrange(worklength)]
+        if rwork is None:
+            self._rwork = [npw.zeros(memshape) for _ in xrange(worklength)]
         else:
             assert len(self._rwork) == worklength
             for wk in self._rwork:
                 assert wk.shape == memshape
-        assert self.outvar.nbComponents == self.invar.nbComponents
-
-        self._function = Curl(self.invar.topology, self._rwork,
-                              self.method[SpaceDiscretisation])
-
-    @staticmethod
-    def getWorkLengths(method=None, domain_dim=None):
-        return Curl.getWorkLengths()
 
+    @debug
+    @profile
     def apply(self, simulation=None):
-        # Calling for requirements completion
-        DiscreteOperator.apply(self, simulation)
         self._synchronize(self.invar.data)
         self.outvar.data = self._function(self.invar.data, self.outvar.data)
 
@@ -119,22 +133,19 @@ class GradFD(Differential):
     Compute the grad of a discrete field, using finite differences.
     """
 
-    def setUp(self):
+    def __init__(self, **kwds):
+
+        super(GradFD, self).__init__(**kwds)
         # prepare ghost points synchro for velocity
         self._synchronize = UpdateGhosts(self.invar.topology,
                                          self.invar.nbComponents)
-        assert self.outvar.nbComponents == self._dim * self.invar.nbComponents
+        dim = self.domain.dimension
+        assert self.outvar.nbComponents == dim * self.invar.nbComponents
         self._function = GradV(self.invar.topology,
                                self.method[SpaceDiscretisation])
 
-    def curlFD(self, simulation=None):
-        # Calling for requirements completion
-        DiscreteOperator.apply(self, simulation)
-        self._synchronize(self.invar.data)
-        self.outvar.data = self._function(self.invar.data, self.outvar.data)
-
+    @debug
+    @profile
     def apply(self, simulation=None):
-        # Calling for requirements completion
-        DiscreteOperator.apply(self, simulation)
         self._synchronize(self.invar.data)
         self.outvar.data = self._function(self.invar.data, self.outvar.data)
diff --git a/HySoP/hysop/operator/discrete/diffusion_fft.py b/HySoP/hysop/operator/discrete/diffusion_fft.py
index 99c0cbb1bc88936960d7ca3eb27a4f53a886c604..1e84701d0e2585c658a4ac15d9d29c51b5cbad95 100644
--- a/HySoP/hysop/operator/discrete/diffusion_fft.py
+++ b/HySoP/hysop/operator/discrete/diffusion_fft.py
@@ -10,6 +10,7 @@ except ImportError:
 from parmepy.operator.discrete.discrete import DiscreteOperator
 from parmepy.constants import debug
 from parmepy.mpi import MPI
+from parmepy.tools.profiler import profile
 
 
 class DiffusionFFT(DiscreteOperator):
@@ -19,7 +20,7 @@ class DiffusionFFT(DiscreteOperator):
 
     """
     @debug
-    def __init__(self, vorticity, viscosity, method=None):
+    def __init__(self, vorticity, viscosity, **kwds):
         """
         Constructor.
         @param[in,out] vorticity :  discretisation of the field \f$ \omega \f$.
@@ -31,29 +32,27 @@ class DiffusionFFT(DiscreteOperator):
         self.viscosity = viscosity
 
         if self.vorticity.dimension == 1:
-            raise AttributeError("Wrong problem dimension: only 2D \
-                and 3D cases are implemented.")
+            raise AttributeError("1D case not yet implemented.")
         # Base class initialisation
-        DiscreteOperator.__init__(self, [self.vorticity], method)
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        super(DiffusionFFT, self).__init__(variables=[vorticity],
+                                           **kwds)
         self.input = [self.vorticity]
         self.output = [self.vorticity]
 
     @debug
+    @profile
     def apply(self, simulation=None):
-        if simulation is None:
-            raise ValueError("Missing dt value for diffusion computation.")
-        # Calling for requirements completion
-        DiscreteOperator.apply(self, simulation)
-        ctime = MPI.Wtime()
-
+        assert simulation is not None, \
+            "Missing dt value for diffusion computation."
         dt = simulation.timeStep
-        ghosts = self.vorticity.topology.ghosts
+        ghosts = self.vorticity.topology.ghosts()
 
-        if (self.vorticity.dimension == 2):
+        if self.vorticity.dimension == 2:
             self.vorticity.data = fftw2py.solve_diffusion_2d(
                 self.viscosity * dt, self.vorticity.data)
 
-        elif (self.vorticity.dimension == 3):
+        elif self.vorticity.dimension == 3:
             self.vorticity.data[0], self.vorticity.data[1],\
                 self.vorticity.data[2] = \
                 fftw2py.solve_diffusion_3d(self.viscosity * dt,
@@ -64,10 +63,12 @@ class DiffusionFFT(DiscreteOperator):
 
         else:
             raise ValueError("invalid problem dimension")
-        self._apply_timer.append_time(MPI.Wtime() - ctime)
 
     def finalize(self):
         """
         Clean memory (fftw plans and so on)
         """
-        fftw2py.clean_fftw_solver(self.vorticity.dimension)
+        pass
+        # TODO : fix bug that occurs when several finalize
+        # of fft operators are called.
+        # fftw2py.clean_fftw_solver(self.vorticity.dimension)
diff --git a/HySoP/hysop/operator/discrete/discrete.py b/HySoP/hysop/operator/discrete/discrete.py
index a3d8fa7d56f1c940f63a6fa5011f3b531d4bfe9a..fc3b03de6c06d6560d9b67d053eb7cf6169df9f8 100644
--- a/HySoP/hysop/operator/discrete/discrete.py
+++ b/HySoP/hysop/operator/discrete/discrete.py
@@ -4,8 +4,8 @@ Abstract interface for discrete operators.
 """
 from abc import ABCMeta, abstractmethod
 from parmepy.constants import debug
-from parmepy.tools.timers import Timer, ManualFunctionTimer
 from parmepy.methods_keys import GhostUpdate
+from parmepy.tools.profiler import Profiler
 
 
 class DiscreteOperator(object):
@@ -24,15 +24,27 @@ class DiscreteOperator(object):
 
     @debug
     @abstractmethod
-    def __init__(self, variables, method=None):
+    def __init__(self, variables, rwork=None, iwork=None, method=None,
+                 mpi_params=None):
         """
-        Create an empty discrete operator.
+        Abstract base class for discrete operators.
+        @param variables : a list of discrete fields
+        (parmepy.fields.discrete.Discrete)
+        @param rwork : a list of work arrays of reals.
+        if None, local work arrays will be allocated.
+        @param iwork : a list of work arrays of int.
+        if None, local work arrays will be allocated.
+        @param method : parameters of the discretization
         """
         if isinstance(variables, list):
             ## variables
             self.variables = variables
         else:
             self.variables = [variables]
+
+        self.domain = self.variables[0].domain
+        self._dim = self.domain.dimension
+
         ## Input variables
         self.input = []
         ## Output variables
@@ -41,69 +53,61 @@ class DiscreteOperator(object):
         if method is None:
             method = {}
         self.method = method
-        if not GhostUpdate in method:
+        if GhostUpdate not in method:
             method[GhostUpdate] = True
+        ## Operator name
         self.name = self.__class__.__name__
         ## Object to store computational times of lower level functions
-        self.timer = Timer(self)
-        ## bool to check if the setup function has been called for
-        ## this operator
-        self._isUpToDate = False
-        ## True if work arrays are provided by external call (self.setWorks)
-        self.hasExternalWork = False
-        self._apply_timer = ManualFunctionTimer('apply_function')
-        self.timer.addFunctionTimer(self._apply_timer)
-        # Local (optional) work arrays. Set with setWorks function
+        self.profiler = Profiler(self, self.domain.comm_task)
+
+        # Allocate or check work arrays.
+        # Their shapes, number ... strongly depends
+        # on the type of discrete operator.
+        # A _set_work_arrays function must be implemented
+        # in all derived classes where work are required.
         self._rwork = None
         self._iwork = None
+        self._set_work_arrays(rwork, iwork)
+
+        # Function to synchronize ghosts if needed
+        self._synchronize = None
+
+        # Object that deals with output file writing.
+        # Optional.
+        self._writer = None
+        # Check topologies consistency
+        if self.variables is not None:
+            topoRef = self.variables[0].topology
+            for v in self.variables:
+                assert v.topology.isConsistentWith(topoRef)
 
-    @staticmethod
-    def getWorkLengths(nb_components=None, domain_dim=None):
+    def get_work_properties(self):
         """
-        Compute the number of required work arrays for this method.
-        @param nb_components : number of components of the
-        @param domain_dim : dimension of the domain
-        fields on which this method operates.
-        @return length of list of work arrays of reals.
-        @return length of list of work arrays of int.
+        Return the length of working arrays lists required
+        for the discrete operator.
+        @return shapes, shape of the arrays:
+        shapes['rwork'] == list of shapes for real arrays,
+        shapes['iwork'] == list of shapes for int arrays.
+        len(shapes['...'] gives the number of required arrays.
         """
-        assert nb_components is None
-        assert domain_dim is None
-        return 0, 0
-
-    def setWorks(self, rwork=None, iwork=None):
-
-        # Set work arrays for real and int.
-        # Warning : no copy! We must have pointer
-        # links between work and self.work arrays.
-        if rwork is None:
-            rwork = []
-        if iwork is None:
-            iwork = []
-        self._rwork = rwork
-        self._iwork = iwork
-        self.hasExternalWork = True
-        # Warning : we do not check work lengths/size.
-        # This will be done during discreteOperator.setUp.
-
-    def setUp(self):
+        return {'rwork': None, 'iwork': None}
+
+    def _set_work_arrays(self, rwork, iwork):
         """
-        Update the operator --> after calling this function,
-        the operator is ready to use.
-        Main steps are:
-        - allocate (if required) work arrays
-        - check work arrays number and shapes
-        - create instances of numerical methods required by
-        this operator
+        @param rwork : None or a list of numpy real arrays.
+        @param iwork : None or a list of numpy int arrays.
+        if work is None, allocation of local work arrays.
+        Else connect self._rwork, self._iwork to rwork, iwork
+        If this function is not implemented in the derived class,
+        it means that work arrays are not required.
         """
-        self._isUpToDate = True
+        pass
 
-    def isUp(self):
+    def setWriter(self, writer):
         """
-        True if ready to be applied (--> setup function has
-        been called succesfully)
+        Assign a writer to the current operator
         """
-        return self._isUpToDate
+        self._writer = writer
 
     @debug
     @abstractmethod
@@ -114,7 +118,6 @@ class DiscreteOperator(object):
         parameters (time, time step, iteration number ...), see
         parmepy.problem.simulation.Simulation for details.
         """
-        pass
 
     @debug
     def finalize(self):
@@ -137,7 +140,7 @@ class DiscreteOperator(object):
                 s += str(f) + "\n"
         return s
 
-    def updateGhosts(self):
+    def update_ghosts(self):
         """
         Update ghost points values, if any.
         This function must be implemented in the discrete
@@ -147,3 +150,7 @@ class DiscreteOperator(object):
         operator apply.
         """
         pass
+
+    def get_profiling_info(self):
+        """Get the manual profiler informations into the default profiler"""
+        pass
diff --git a/HySoP/hysop/operator/discrete/discrete.pyc b/HySoP/hysop/operator/discrete/discrete.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..69848e90f644fe35eb10b9db269ffab49a8fbcd5
Binary files /dev/null and b/HySoP/hysop/operator/discrete/discrete.pyc differ
diff --git a/HySoP/hysop/operator/discrete/energy_enstrophy.py b/HySoP/hysop/operator/discrete/energy_enstrophy.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a88e985f3536f1d3dbea3c04c81c03e0c0cefee
--- /dev/null
+++ b/HySoP/hysop/operator/discrete/energy_enstrophy.py
@@ -0,0 +1,140 @@
+# -*- coding: utf-8 -*-
+"""
+@file energy_enstrophy.py
+Compute Energy and Enstrophy
+"""
+from parmepy.constants import debug
+from parmepy.tools.timers import timed_function
+import parmepy.tools.numpywrappers as npw
+from parmepy.operator.discrete.discrete import DiscreteOperator
+
+
+class EnergyEnstrophy(DiscreteOperator):
+    """
+    Discretization of the energy/enstrophy computation process.
+    """
+    def __init__(self, velocity, vorticity, is_normalized=True, **kwds):
+        """
+        Constructor.
+        @param velocity : discretization of the velocity field
+        @param vorticity : discretization of the vorticity field
+        @param coeffs : dict of coefficients
+        """
+        ## velocity field
+        self.velocity = velocity
+        ## vorticity field
+        self.vorticity = vorticity
+
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        super(EnergyEnstrophy, self).__init__(variables=[velocity, vorticity],
+                                              **kwds)
+        ## Coeffs for integration
+        self.coeff = {}
+        ## Global energy
+        self.energy = 0.0
+        ## Global enstrophy
+        self.enstrophy = 0.0
+        topo_w = self.vorticity.topology
+        topo_v = self.velocity.topology
+        space_step = topo_w.mesh.space_step
+        length = topo_w.domain.length
+        # remark topo_w.domain and topo_v.domain
+        # must be the same, no need to topo_v...length.
+        self.coeff['Enstrophy'] = npw.prod(space_step)
+        space_step = topo_v.mesh.space_step
+        self.coeff['Energy'] = 0.5 * npw.prod(space_step)
+        if is_normalized:
+            normalization = 1. / npw.prod(length)
+            self.coeff['Enstrophy'] *= normalization
+            self.coeff['Energy'] *= normalization
+
+    def _set_work_arrays(self, rwork=None, iwork=None):
+
+        v_ind = self.velocity.topology.mesh.iCompute
+        w_ind = self.vorticity.topology.mesh.iCompute
+        shape_v = self.velocity.data[0][v_ind].shape
+        shape_w = self.velocity.data[0][w_ind].shape
+        # setup for rwork, iwork is useless.
+        if rwork is None:
+            # ---  Local allocation ---
+            if shape_v == shape_w:
+                self._rwork = [npw.zeros(shape_v)]
+            else:
+                self._rwork = [npw.zeros(shape_v), npw.zeros(shape_w)]
+        else:
+            assert isinstance(rwork, list), 'rwork must be a list.'
+            # --- External rwork ---
+            self._rwork = rwork
+            if shape_v == shape_w:
+                assert len(self._rwork) == 1
+                assert self._rwork[0].shape == shape_v
+            else:
+                assert len(self._rwork) == 2
+                assert self._rwork[0].shape == shape_v
+                assert self._rwork[1].shape == shape_w
+
+    def get_work_properties(self):
+
+        v_ind = self.velocity.topology.mesh.iCompute
+        w_ind = self.vorticity.topology.mesh.iCompute
+        shape_v = self.velocity.data[0][v_ind].shape
+        shape_w = self.velocity.data[0][w_ind].shape
+        if shape_v == shape_w:
+            return {'rwork': [shape_v], 'iwork': None}
+        else:
+            return {'rwork': [shape_v, shape_w], 'iwork': None}
+
+    @debug
+    @timed_function
+    def apply(self, simulation=None):
+        if simulation is None:
+            raise ValueError("Missing simulation value for computation.")
+
+        # --- Kinetic energy computation ---
+        vd = self.velocity
+        # get the list of computation points (no ghosts)
+        nbc = vd.nbComponents
+        v_ind = self.velocity.topology.mesh.iCompute
+        # Integrate (locally) velocity ** 2
+        local_energy = 0.
+        for i in xrange(nbc):
+            self._rwork[0][...] = vd[i][v_ind] ** 2
+            local_energy += npw.real_sum(self._rwork[0])
+
+        # --- Enstrophy computation ---
+        vortd = self.vorticity
+        nbc = vortd.nbComponents
+        w_ind = self.vorticity.topology.mesh.iCompute
+        # Integrate (locally) vorticity ** 2
+        work = self._rwork[-1]
+        local_enstrophy = 0.
+        for i in xrange(nbc):
+            work[...] = vortd[i][w_ind] ** 2
+            local_enstrophy += npw.real_sum(work)
+
+        # --- Reduce energy and enstrophy values overs all proc ---
+        # two ways : numpy or classical. Todo : check perf and comm
+        sendbuff = npw.zeros((2))
+        recvbuff = npw.zeros((2))
+        sendbuff[:] = [local_energy, local_enstrophy]
+        #
+        self.velocity.topology.comm.Allreduce(sendbuff, recvbuff)
+        # the other way :
+        #energy = self.velocity.topology.allreduce(local_energy,
+        #                                          PARMES_MPI_REAL,
+        #                                          op=MPI.SUM)
+        #enstrophy = self.velocity.topology.allreduce(local_enstrophy,
+        #                                             PARMES_MPI_REAL,
+        #                                             op=MPI.SUM)
+
+        # Update global values
+        self.energy = recvbuff[0] * self.coeff['Energy']
+        self.enstrophy = recvbuff[1] * self.coeff['Enstrophy']
+
+        # Print results, if required
+        ite = simulation.currentIteration
+        if self._writer is not None and self._writer.do_write(ite):
+            self._writer.buffer[0, 0] = simulation.time
+            self._writer.buffer[0, 1] = self.energy
+            self._writer.buffer[0, 2] = self.enstrophy
+            self._writer.write()
diff --git a/HySoP/hysop/operator/discrete/particle_advection.py b/HySoP/hysop/operator/discrete/particle_advection.py
index 050f16fb07d6040d543e9cd95cf024d62ecf5680..0d63860c8e19fced0c9ccfb6e9479248ee20eeeb 100644
--- a/HySoP/hysop/operator/discrete/particle_advection.py
+++ b/HySoP/hysop/operator/discrete/particle_advection.py
@@ -4,13 +4,14 @@
 Advection solver, particular method, pure-python version.
 
 """
-from parmepy.constants import debug, WITH_GUESS, PARMES_INDEX
-from parmepy.methods_keys import TimeIntegrator, Interpolation, Remesh
+from parmepy.constants import debug, WITH_GUESS, PARMES_REAL, PARMES_DIM
+from parmepy.methods_keys import TimeIntegrator, Interpolation, Remesh, Support
 from parmepy.operator.discrete.discrete import DiscreteOperator
-from parmepy.fields.continuous import Field
 import parmepy.tools.numpywrappers as npw
-from parmepy.mpi import MPI
 import parmepy.default_methods as default
+import numpy as np
+from parmepy.numerics.remeshing import Remeshing
+from parmepy.tools.profiler import profile
 
 
 class ParticleAdvection(DiscreteOperator):
@@ -19,195 +20,182 @@ class ParticleAdvection(DiscreteOperator):
     """
 
     @debug
-    def __init__(self, velocity, advectedFields, d,
-                 part_position=None, part_advectedFields=None,
-                 method=None,
-                 isMultiScale=False):
+    def __init__(self, velocity, fields_on_grid, direction, **kwds):
         """
         Constructor.
-        @param velocity discrete field
-        @param advectedFields : list of discrete fields to be advected
-        @param d : Direction to advect
-        @param method : Method used
-          - Integration methods:
-            - 'rk2' : Runge Kutta 2nd order advection
-            - 'rk4' : Runge Kutta 4th order advection
-          - remeshing formula:
-            - 'm4prime' : = 'l2_1'
-            - 'l2_1' : Labmda2,1 : (=M'4) 4 point formula, C1 regularity
-            - 'l2_2' : Labmda2,2 : 4 point formula, C2 regularity
-            - 'm6prime' : = 'l4_2'
-            - 'l4_2' : Labmda4,2 : (=M'6) 6 point formula, C2 regularity
-            - 'l4_3' : Labmda4,3 : 6 point formula, C3 regularity
-            - 'l4_4' : Labmda4,4 : 6 point formula, C4 regularity
-            - 'l6_3' : Labmda6,3 : 8 point formula, C3 regularity
-            - 'l6_4' : Labmda6,4 : 8 point formula, C4 regularity
-            - 'l6_5' : Labmda6,5 : 8 point formula, C5 regularity
-            - 'l6_6' : Labmda6,6 : 8 point formula, C6 regularity
-            - 'l8_4' : Labmda8,4 : 10 point formula, C4 regularity
-            - 'm8prime' : M8prime formula
+        @param velocity: discretization of the velocity field
+        @param fields_on_grid : list of discretized fields to be advected.
+        @param direction : direction of advection
         """
         ## Advection velocity
         self.velocity = velocity
+
+        # set variables list ...
         variables = [self.velocity]
-        if not isinstance(advectedFields, list):
-            self.advectedFields = [advectedFields]
+        if not isinstance(fields_on_grid, list):
+            self.fields_on_grid = [fields_on_grid]
         else:
-            self.advectedFields = advectedFields
-        # update the list of discrete variables for this operator
-        [variables.append(advecF) for advecF in self.advectedFields]
-        if method is None:
-            method = default.ADVECTION
-        DiscreteOperator.__init__(self, variables, method)
+            self.fields_on_grid = fields_on_grid
+        for f in self.fields_on_grid:
+            variables.append(f)
+
+        if 'method' not in kwds:
+            kwds['method'] = default.ADVECTION
+
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        super(ParticleAdvection, self).__init__(variables=variables, **kwds)
 
         self.input = self.variables
-        self.output = [df for df in self.variables if not df is self.velocity]
-        self.dir = d
-        self.part_position = part_position
-        self.part_advectedFields = part_advectedFields
-        self._dim = self.velocity.dimension
-        self._workspace, self._iworkspace = [], []
-        self._isMultiScale = isMultiScale
-
-    @staticmethod
-    def getWorkLengths(method=None, domain_dim=None):
+        self.output = [df for df in self.variables if df is not self.velocity]
+        self.direction = direction
+
+        self._configure_numerical_methods()
+
+    def _configure_numerical_methods(self):
         """
-        Return the length of working arrays lists required
-        for advction discrete operator, depending on :
-        - the time integrator (RK2, ...)
-        - the interpolation (which depends on domain dimension)
-        - the remeshing (which depends on domain dimension)
-        @param method : the dict of parameters for the operator.
-        Default = parmepy.default_methods.ADVECTION
+        Function to set the numerical method for python operator and link them
+        to the proper working arrays.
         """
-        if method is None:
-            method = default.ADVECTION
-        assert Interpolation in method,\
-            'An interpolation is required for the advection method.'
-        assert TimeIntegrator in method,\
-            'A time integrator is required for the advection method.'
-        assert Remesh in method,\
-            'A remesh is required for the advection method.'
-        tw = method[TimeIntegrator].getWorkLengths(1)
-        iw, iiw = method[Interpolation].getWorkLengths(domain_dim=domain_dim)
-        rw, riw = method[Remesh].getWorkLengths(domain_dim=domain_dim)
-        return max(tw + iw, rw), max(iiw, riw)
-
-    @debug
-    def setUp(self):
-        ## Result position
-        if self.part_position is None:
-            self.part_position = \
-                Field(self.advectedFields[0].topology.domain,
-                      "Particle_Position",
-                      isVector=False
-                      ).discretize(self.advectedFields[0].topology)
-        ## Result advectedFields
-        if self.part_advectedFields is None:
-            self.part_advectedFields = [
-                Field(adF.topology.domain, "Particle_AdvectedFields",
-                      isVector=adF.isVector).discretize(adF.topology)
-                for adF in self.advectedFields]
-
-        RKn = self.method[TimeIntegrator]
-        Interpol = self.method[Interpolation]
-        Rmsh = self.method[Remesh]
-        memshape = self.part_position.data[0].shape
+        # Use first field topology as reference
+        topo = self.fields_on_grid[0].topology
+
+        # --- Initialize time integrator for advection ---
+
+        w_interp, iw_interp =\
+            self.method[Interpolation].getWorkLengths(
+                domain_dim=self.domain.dimension)
+        self._rw_interp = self._rwork[:w_interp]
+        self._iw_interp = self._iwork[:iw_interp]
+
+        vd = self.velocity.data[self.direction]
+        num_interpolate = \
+            self.method[Interpolation](vd, self.direction, topo,
+                                       work=self._rw_interp,
+                                       iwork=self._iw_interp)
+
+        w_rk = self.method[TimeIntegrator].getWorkLengths(nb_components=1)
+        self._rw_integ = self._rwork[w_interp:w_interp + w_rk]
+        self.num_advec = self.method[TimeIntegrator](1, work=self._rw_integ,
+                                                     f=num_interpolate,
+                                                     topo=topo,
+                                                     optim=WITH_GUESS)
+        # --- Initialize remesh ---
+        w_remesh, iw_remesh =\
+            Remeshing.getWorkLengths(
+                domain_dim=self.domain.dimension)
+        self._rw_remesh = self._rwork[:w_remesh]
+        self._iw_remesh = self._iwork[:iw_remesh]
+
+        self.num_remesh = Remeshing(self.method[Remesh],
+                                    self.domain.dimension,
+                                    topo, self.direction,
+                                    work=self._rw_remesh,
+                                    iwork=self._iw_remesh)
+
+        ## Particles positions
+        start = max(w_interp + w_rk, w_remesh)
+        self.part_position = [self._rwork[start]]
+
+        ## Fields on particles
+        self.fields_on_part = {}
+        start += 1
+        for f in self.fields_on_grid:
+            self.fields_on_part[f] = self._rwork[start: start + f.nbComponents]
+            start += f.nbComponents
+
+    def _set_work_arrays(self, rwork=None, iwork=None):
+        memshape = self.fields_on_grid[0].data[0].shape
         # Get work lengths
-        w, iw = self.getWorkLengths(self.method, self._dim)
-        w_rk = RKn.getWorkLengths(nb_components=1)
-        w_interp, iw_interp = Interpol.getWorkLengths(domain_dim=self._dim)
-        w_remesh, iw_remesh = Rmsh.getWorkLengths(domain_dim=self._dim)
-        # Allocate local works
-        if not self.hasExternalWork:
-            for i in xrange(w):
-                self._workspace.append(
-                    npw.zeros_like(self.part_position.data[0]))
-            for i in xrange(iw):
-                self._iworkspace.append(
-                    npw.zeros(memshape, dtype=PARMES_INDEX))
+        dimension = self.domain.dimension
+        if self.method[Support].find('gpu') < 0:
+            tiw = self.method[TimeIntegrator].getWorkLengths(1)
+            iw, iiw = \
+                      self.method[Interpolation].getWorkLengths(domain_dim=dimension)
+            rw, riw = Remeshing.getWorkLengths(domain_dim=dimension)
+            iwl = max(iiw, riw)
+            rw = max(tiw + iw, rw)
         else:
-            # Check external works lengths
-            assert len(self._workspace) == w
-            assert len(self._iworkspace) == iw
-            for wk in self._workspace:
-                assert wk.shape == tuple(memshape)
-            for wk in self._iworkspace:
-                assert wk.shape == tuple(memshape)
-
-        # Distribute work arrays
-        self._interpol_w = self._workspace[:w_interp]
-        self._interpol_iw = self._iworkspace[:iw_interp]
-        self._integrator_w = self._workspace[w_interp:w_interp + w_rk]
-        self._remesh_w = self._workspace[:w_remesh]
-        self._remesh_iw = self._iworkspace[:iw_remesh]
-
-        self.num_interpolate = \
-            Interpol(self.velocity, self.dir,
-                     self.advectedFields[0].topology,
-                     work=self._interpol_w,
-                     iwork=self._interpol_iw)
-        self.num_advec = RKn(1, work=self._integrator_w,
-                             f=self.num_interpolate,
-                             topo=self.velocity.topology, optim=WITH_GUESS)
-        self.num_remesh = Rmsh(
-            self.velocity.dimension,
-            self.advectedFields[0].topology,
-            self.dir, work=self._remesh_w,
-            iwork=self._remesh_iw)
-
-        self._isUpToDate = True
+            # For GPU version, no need of numerics works
+            iwl, rw = 0, 0
+        # Shape of reference comes from fields, not from velocity
+        fd = self.fields_on_grid[0]
+        memshape = fd.topology.mesh.resolution
+        rw += np.sum([f.nbComponents for f in self.fields_on_grid])
+        if self.method[Support].find('gpu') < 0 or \
+           self.method[Support].find('gpu_2k') >= 0:
+            rw += 1  # work array for positions
+
+        if rwork is None:
+            self._rwork = []
+            for i in xrange(rw):
+                self._rwork.append(npw.zeros(memshape))
+        else:
+            assert len(rwork) == rw
+            try:
+                for wk in rwork:
+                    assert wk.shape == tuple(memshape)
+            except AttributeError:
+                # Work array has been replaced by an OpenCL Buffer
+                # Testing the buffer size instead of shape
+                for wk in rwork:
+                    s = wk.size / np.prod(memshape)
+                    assert (PARMES_REAL is np.float32 and s == 4) or \
+                        (PARMES_REAL is np.float64 and s == 8)
+            self._rwork = rwork
+
+        if iwork is None:
+            self._iwork = []
+            for i in xrange(iwl):
+                self._iwork.append(npw.dim_zeros(memshape))
+        else:
+            assert len(iwork) == iwl
+            try:
+                for wk in iwork:
+                    assert wk.shape == tuple(memshape)
+            except AttributeError:
+                # Work array has been replaced by an OpenCL Buffer
+                # Testing the buffer size instead of shape
+                for wk in iwork:
+                    s = wk.size / np.prod(memshape)
+                    assert (PARMES_DIM is np.int16 and s == 2) or \
+                        (PARMES_DIM is np.int32 and s == 4) or \
+                        (PARMES_DIM is np.int64 and s == 8)
+            self._iwork = iwork
 
     @debug
-    def apply(self, simulation, dtCoeff, split_id, old_dir=None):
+    @profile
+    def apply(self, simulation=None, dt_coeff=1., split_id=0, old_dir=0):
         """
-        Apply advection operator.
-
-        @param t : current time.
-        @param dt : time step.
-        @param d : Direction of splitting.
-
         Advection algorithm:
-        @li 1. Particle initialization : \n
-                 - by copy advectedFields from grid to particles
-                 if previous splitting direction equals current
-                 splitting direction.\n
-                 - by transposition of advectedFields from grid to particle.
-        @li 2. Particle advection :\n
-                 - compute particle position in splitting direction as a
-                 advectedFields. Performs a RK2 resolution of dx_p/dt = a_p.
+        - initialize particles and fields with their values on the grid.
+        - compute particle positions in splitting direction,
+        (time integrator), resolution of dx_p/dt = a_p.
+        - remesh fields from particles to grid
         """
-        # Calling for requirements completion
-        DiscreteOperator.apply(self, simulation)
-        ctime = MPI.Wtime()
+        assert simulation is not None, \
+            'Simulation parameter is missing.'
 
-        t, dt = simulation.time, simulation.timeStep * dtCoeff
-        # copy for particle advected initialization
-        for p_adF, adF in zip(self.part_advectedFields, self.advectedFields):
-            for dim in xrange(adF.nbComponents):
-                p_adF[dim][...] = adF[dim][...]
+        t, dt = simulation.time, simulation.timeStep * dt_coeff
+        # Initialize fields on particles with fields on grid values.
+        for fg in self.fields_on_grid:
+            for d in xrange(fg.nbComponents):
+                self.fields_on_part[fg][d][...] = fg[d][...]
 
-        # Get particle initial position (grid points)
-        self.part_position[0][...] = \
-            self.part_position.topology.mesh.coords[self.dir]
+        # Initialize particles on the grid
+        toporef = self.fields_on_grid[0].topology
+        self.part_position[0][...] = toporef.mesh.coords[self.direction]
 
         # Advect particles
         # RK use the first 2 (or 3) works and leave others to interpolation
         # First work contains fist evaluation of ode right hand side.
-        self._integrator_w[0][...] = self.velocity.data[self.dir][...]
-        self.part_position.data = self.num_advec(
-            t, self.part_position.data, dt, result=self.part_position.data)
+        self._rw_integ[0][...] = self.velocity.data[self.direction][...]
+        self.part_position = self.num_advec(
+            t, self.part_position, dt, result=self.part_position)
 
         # Remesh particles
         # It uses the last dim + 2 workspaces (same as interpolation)
-        for adF, p_adF in zip(self.advectedFields,
-                              self.part_advectedFields):
-            for dim in xrange(adF.nbComponents):
-                adF[dim][...] = self.num_remesh(
-                    self.part_position.data[0], p_adF.data[dim],
-                    result=adF[dim])
-        self._apply_timer.append_time(MPI.Wtime() - ctime)
-
-    @debug
-    def finalize(self):
-        pass
+        for fg in self.fields_on_grid:
+            fp = self.fields_on_part[fg]
+            for d in xrange(fg.nbComponents):
+                fg[d][...] = self.num_remesh(
+                    self.part_position, fp[d], result=fg[d])
diff --git a/HySoP/hysop/operator/discrete/penalization.py b/HySoP/hysop/operator/discrete/penalization.py
index dbc5e9f5d0e88e6ebf6180d4711ffe83b5d6160d..5746eb9750ad87a970b907aa1142606291ab2867 100644
--- a/HySoP/hysop/operator/discrete/penalization.py
+++ b/HySoP/hysop/operator/discrete/penalization.py
@@ -4,67 +4,116 @@
 Discrete operator for penalization problem.
 """
 from parmepy.constants import debug
-from discrete import DiscreteOperator
-from parmepy.tools.timers import timed_function
-import numpy as np
+from parmepy.operator.discrete.discrete import DiscreteOperator
+from parmepy.tools.profiler import profile
+from parmepy.domain.subsets.subset import Subset
 
 
-class Penalization_d(DiscreteOperator):
+class Penalization(DiscreteOperator):
     """
     Discretized penalisation operator.
     See details in parmepy.operator.penalization
     """
 
     @debug
-    def __init__(self, variables, obstacles, factor):
+    def __init__(self, obstacles, coeff, **kwds):
         """
-        Constructor.
-        @param[in,out] variables : list of discrete fields to be penalized
-        @param[in] obstacle : physical domain in which the penalization
-        is applied.
-        @param[in] factor : penalization factoricient
+        @param[in] obstacles : dictionnary or list of
+        subsets on which penalization must be applied
+        @param[in] coeff : penalization factor or function.
+
+        Set :
+        - obstacles = {obs1: coeff1, obs2: coeff2, ...} and coeff=None
+        to apply a different coefficient on each subset.
+        - obstacles = [obs1, obs2, ...], coeff=coeff1 to apply the
+        same penalization on all subsets.
+        obs1, ob2 ... must be some parmepy.domain.subsets.Subset
+        and coeff1 must be either a scalar or a function of the
+        coordinates like
+        def coeff(*args):
+            return 3 * args[0]
+
+        with args[0,1,...] = x,y,...
         """
-        DiscreteOperator.__init__(self, variables)
+        super(Penalization, self).__init__(**kwds)
 
-        ## Penalization parameter
-        self.factor = np.asarray(factor)
-        ## Obstacle
-        if isinstance(obstacles, list):
-            self.obstacles = obstacles
-        else:
-            self.obstacles = [obstacles]
-        # Discretize obstacles, i.e. create boolean arrays
         topo = self.variables[0].topology
-        self.cond = None
-        for obstacle in self.obstacles:
-            cond = obstacle.discretize(topo)
-            if self.cond is None:
-                self.cond = cond
+        # indices of points of grid on which penalization is applied.
+        # It may be a single condition (one penal coeff for all subsets)
+        # or a list of conditions (one different coeff for each subset).
+        self._cond = None
+        if isinstance(obstacles, list):
+            msg = 'A penalization factor is required for the obstacles.'
+            assert coeff is not None, msg
+            self._coeff = coeff
+            self._cond = self._init_single_coeff(obstacles, topo)
+            self._apply = self._apply_single_coeff
+
+        elif isinstance(obstacles, dict):
+            # cond is a dictionnary, key = list of indices,
+            # value = penalization coeff
+            self._cond, self._coeff = self._init_multi_coeff(obstacles, topo)
+            self._apply = self._apply_multi_coeff
+
+        for v in self.variables:
+            msg = 'Multiresolution not implemented for penalization.'
+            assert v.topology == topo, msg
+
+        # list of numpy arrays to penalize
+        self._varlist = []
+        for v in self.variables:
+            for d in xrange(v.nbComponents):
+                self._varlist.append(v[d])
+
+    def _init_single_coeff(self, obstacles, topo):
+        """
+        Compute a condition which represents the union
+        of all obstacles.
+        """
+        for obs in obstacles:
+            assert not obs.is_porous
+        assert isinstance(obstacles, list)
+        return Subset.union(obstacles, topo)
+
+    def _init_multi_coeff(self, obstacles, topo):
+        """
+        Compute a condition which represents the union
+        of all obstacles.
+        """
+        cond = []
+        coeff = []
+        for obs in obstacles:
+            if obs.is_porous:
+                assert isinstance(obstacles[obs], list)
+                current = obs.ind[topo]
+                nb_layers = len(current)
+                assert len(current) == nb_layers
+                for i in xrange(nb_layers):
+                    # append the list of indices
+                    cond.append(current[i])
+                    # and its corresponding coeff
+                    coeff.append(obstacles[obs][i])
             else:
-                lsize = min(len(self.cond), len(cond))
-                for c in xrange(lsize):
-                    np.logical_or(cond[c], self.cond[c], self.cond[c])
-                for c in xrange(lsize + 1, len(cond)):
-                    self.cond[c] = cond[c]
-        assert self.factor.size == len(self.cond)
-        self.input = self.output = self.variables
+                cond.append(obs.ind[topo])
+                coeff.append(obstacles[obs])
+        return cond, coeff
 
     @debug
-    @timed_function
-    def apply(self, simulation=None):
-        if simulation is None:
-            raise ValueError("Missing dt value for penalization computation.")
+    @profile
+    def _apply_single_coeff(self, dt):
+        coef = 1.0 / (1.0 + dt * self._coeff)
+        for v in self._varlist:
+            v[self._cond] *= coef
 
-        # Calling for requirements completion
-        DiscreteOperator.apply(self, simulation)
+    def _apply_multi_coeff(self, dt):
+        for i in xrange(len(self._cond)):
+            coef = 1.0 / (1.0 + dt * self._coeff[i])
+            cond = self._cond[i]
+            for v in self._varlist:
+                v[cond] *= coef
 
+    def apply(self, simulation=None):
+        assert simulation is not None, \
+            "Simulation parameter is required."
         dt = simulation.timeStep
-        coef = 1.0 / (1.0 + dt * self.factor)
-
-        for v in self.variables:
-            i = 0
-            for cond in self.cond:
-                for dim in xrange(v.nbComponents):
-                    v[dim][cond] *= coef[i]
-                i += 1
-
+        self._apply(dt)
diff --git a/HySoP/hysop/operator/discrete/poisson_fft.py b/HySoP/hysop/operator/discrete/poisson_fft.py
index 28911c2cae934f93fd2398e700da73b89a134ccc..f9ba044f3867bf099b5bb8ecb088f5fe87cb1b6a 100644
--- a/HySoP/hysop/operator/discrete/poisson_fft.py
+++ b/HySoP/hysop/operator/discrete/poisson_fft.py
@@ -10,8 +10,9 @@ except ImportError:
     from parmepy.fakef2py import fftw2py
 
 from parmepy.operator.discrete.discrete import DiscreteOperator
-from parmepy.constants import debug, prof
-from parmepy.mpi import MPI
+from parmepy.operator.discrete.reprojection import Reprojection
+from parmepy.constants import debug
+from parmepy.tools.profiler import profile
 
 
 class PoissonFFT(DiscreteOperator):
@@ -22,20 +23,26 @@ class PoissonFFT(DiscreteOperator):
 
     @debug
     def __init__(self, velocity, vorticity, projection=None,
-                 multires=False, filterSize=None, correction=None):
+                 filterSize=None, correction=None, **kwds):
         """
         Constructor.
-        @param[out] velocity : discretisation of the solution field
-        @param[in] vorticity : discretisation of the RHS (mind the minus rhs!)
-        @param projection :
-        @param[in] multires : true if velo/vorticity do not have the same
-        resolution
+        @param[out] velocity : discretization of the solution field
+        @param[in] vorticity : discretization of the RHS (mind the minus rhs!)
+        @param projection : if None, no projection. Else:
+        - either the value of the frequency of reprojection, never updated.
+        - or Reprojection discrete operator. In that case, a criterion
+        depending on the vorticity will be computed at each time step, if
+        criterion > threshold, then frequency projection is active.
         @param filterSize :
         @param correction : operator used to shift velocity according
         to a given input (fixed) flowrate.
         See parmepy.operator.velocity_correction.
         Default = None.
         """
+        # Base class initialisation
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        super(PoissonFFT, self).__init__(variables=[velocity, vorticity],
+                                         **kwds)
         ## Solution field
         self.velocity = velocity
         ## RHS field
@@ -44,54 +51,71 @@ class PoissonFFT(DiscreteOperator):
         self.projection = projection
         ## Filter size array = domainLength/(CoarseRes-1)
         self.filterSize = filterSize
-        # Base class initialisation
-        DiscreteOperator.__init__(self, [velocity, vorticity])
-
         # If 2D problem, vorticity must be a scalar
         self.dim = self.velocity.domain.dimension
         if self.dim == 2:
             assert self.vorticity.nbComponents == 1
-
-        if self.dim != 2 and self.dim != 3:
-            raise AttributeError("Wrong problem dimension: only 2D \
-                                        and 3D cases are implemented.")
-
+        self.correction = correction
         self.input = [self.vorticity]
         self.output = [self.velocity]
+
+        ## The function called during apply
+        self.solve = None
+        # a sub function ...
+        self._solve = None
+        self.do_projection = None
+        self._select_solve()
+
+    def _select_solve(self):
         ## Multiresolution ?
-        self.multires = multires
+        multires = self.velocity.topology.mesh != self.vorticity.topology.mesh
 
         # connexion to the required apply function
-        # (to avoid 'if' calls during apply)
         if self.dim == 2:
             self._solve = self._solve2D
         elif self.dim == 3:
             # If there is a projection, vorticity is also an output
             if self.projection is not None:
                 self.output.append(self.vorticity)
-                if self.multires:
+                if multires:
                     self._solve = self._solve3D_proj_multires
                 else:
                     self._solve = self._solve3D_proj
+
+                if isinstance(self.projection, Reprojection):
+                    self.do_projection = self.do_projection_with_op
+                else:
+                    self.do_projection = self.do_projection_no_op
+
             else:
-                if self.multires:
+                if multires:
                     self._solve = self._solve3D_multires
                 else:
                     self._solve = self._solve3D
+        else:
+            raise AttributeError('Not implemented for 1D problems.')
 
         # Operator to shift velocity according to an input required flowrate
-        if correction is not None:
-            self.correctionOp = correction
+        if self.correction is not None:
             self.solve = self._solve_and_correct
         else:
             self.solve = self._solve
 
+    def do_projection_with_op(self, simu):
+        self.projection.apply(simu)
+        ite = simu.currentIteration
+        return self.projection.do_projection(ite)
+
+    def do_projection_no_op(self, simu):
+        ite = simu.currentIteration
+        return ite % self.projection == 0
+
     def _solve2D(self, simu=None):
         """
         Solve 2D poisson problem
         """
         self.velocity.data[0], self.velocity.data[1] =\
-            fftw2py.solve_poisson_2d(self.vorticity.data,
+            fftw2py.solve_poisson_2d(self.vorticity.data[0],
                                      self.velocity.data[0],
                                      self.velocity.data[1])
 
@@ -99,7 +123,7 @@ class PoissonFFT(DiscreteOperator):
         """
         apply projection onto vorticity
         """
-        ghosts_w = self.vorticity.topology.ghosts
+        ghosts_w = self.vorticity.topology.ghosts()
         self.vorticity.data[0], self.vorticity.data[1], \
             self.vorticity.data[2] = \
                fftw2py.projection_om_3d(self.vorticity.data[0],
@@ -120,8 +144,8 @@ class PoissonFFT(DiscreteOperator):
                                   self.vorticity.data[2])
 
         # Solves Poisson equation using filter vorticity
-        ghosts_v = self.velocity.topology.ghosts
-        ghosts_w = self.vorticity.topology.ghosts
+        ghosts_v = self.velocity.topology.ghosts()
+        ghosts_w = self.vorticity.topology.ghosts()
         self.velocity.data[0], self.velocity.data[1], self.velocity.data[2] = \
             fftw2py.solve_poisson_3d(vortFilter[0], vortFilter[1],
                                      vortFilter[2], self.velocity.data[0],
@@ -132,8 +156,7 @@ class PoissonFFT(DiscreteOperator):
         """
         3D, multiresolution, with projection
         """
-        ite = simu.currentIteration
-        if self.projection.doProjection(ite):
+        if self.do_projection(simu):
             self._project()
         self._solve3D_multires()
 
@@ -141,8 +164,7 @@ class PoissonFFT(DiscreteOperator):
         """
         3D, with projection
         """
-        ite = simu.currentIteration
-        if self.projection.doProjection(ite):
+        if self.do_projection(simu):
             self._project()
         self._solve3D()
 
@@ -151,8 +173,8 @@ class PoissonFFT(DiscreteOperator):
         Basic solve
         """
         # Solves Poisson equation using usual vorticity
-        ghosts_v = self.velocity.topology.ghosts
-        ghosts_w = self.vorticity.topology.ghosts
+        ghosts_v = self.velocity.topology.ghosts()
+        ghosts_w = self.vorticity.topology.ghosts()
         self.velocity.data[0], self.velocity.data[1], self.velocity.data[2] =\
             fftw2py.solve_poisson_3d(self.vorticity.data[0],
                                      self.vorticity.data[1],
@@ -163,16 +185,12 @@ class PoissonFFT(DiscreteOperator):
 
     def _solve_and_correct(self, simu):
         self._solve(simu.currentIteration)
-        self.correctionOp.apply(simu)
+        self.correction.apply(simu)
 
     @debug
-    @prof
+    @profile
     def apply(self, simulation=None):
-        # Calling for requirements completion
-        DiscreteOperator.apply(self, simulation)
-        ctime = MPI.Wtime()
         self.solve(simulation)
-        self._apply_timer.append_time(MPI.Wtime() - ctime)
 
     def finalize(self):
         """
diff --git a/HySoP/hysop/operator/discrete/reprojection.py b/HySoP/hysop/operator/discrete/reprojection.py
new file mode 100644
index 0000000000000000000000000000000000000000..514e3bf773369b1df479865fd702f42cf2f69bfe
--- /dev/null
+++ b/HySoP/hysop/operator/discrete/reprojection.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+"""
+@file reprojection.py
+Compute reprojection criterion and divergence maximum
+"""
+import numpy as np
+from parmepy.constants import debug, PARMES_MPI_REAL
+from parmepy.methods_keys import SpaceDiscretisation
+from parmepy.operator.discrete.discrete import DiscreteOperator
+from parmepy.numerics.finite_differences import FD_C_4
+from parmepy.numerics.differential_operations import GradV
+import parmepy.tools.numpywrappers as npw
+from parmepy.numerics.update_ghosts import UpdateGhosts
+from parmepy.mpi import MPI
+from parmepy.tools.timers import timed_function
+
+
+class Reprojection(DiscreteOperator):
+    """
+    Update the reprojection frequency, according to the current
+    value of the vorticity field.
+    """
+    def __init__(self, vorticity, threshold, frequency, **kwds):
+        """
+        Constructor.
+        @param vorticity: discretization of the vorticity field
+        @param threshold : update frequency when criterion is greater than
+        this threshold
+        @param frequency : set frequency of execution of the reprojection
+        """
+        if 'method' in kwds and kwds['method'] is None:
+            kwds['method'] = {SpaceDiscretisation: FD_C_4}
+
+        ## vorticity field
+        self.vorticity = vorticity
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        super(Reprojection, self).__init__(variables=[vorticity], **kwds)
+        ## Frequency for reprojection
+        self.frequency = frequency
+        ## The initial value will be used as default during
+        # simulation
+        self._default_frequency = frequency
+        # constant defining the reprojection criterion :
+        # if the latter is greater than this constant, then a reprojection
+        # is needed
+        self.threshold = threshold
+        # local counter
+        self._counter = 0
+        ## Numerical methods for space discretization
+        assert SpaceDiscretisation in self.method
+        self.method = self.method[SpaceDiscretisation]
+        self.input = [vorticity]
+        self.output = []
+        topo = self.vorticity.topology
+        # prepare ghost points synchro for vorticity
+        self._synchronize = UpdateGhosts(topo, self.vorticity.nbComponents)
+        # grad function
+        self._function = GradV(topo, method=self.method)
+
+    def _set_work_arrays(self, rwork=None, iwork=None):
+
+        memshape = self.vorticity.data[0].shape
+        worklength = self.vorticity.nbComponents ** 2
+
+        # setup for rwork, iwork is useless.
+        if rwork is None:
+            # ---  Local allocation ---
+            self._rwork = []
+            for i in xrange(worklength):
+                self._rwork.append(npw.zeros(memshape))
+        else:
+            assert isinstance(rwork, list), 'rwork must be a list.'
+            # --- External rwork ---
+            self._rwork = rwork
+            msg = 'Bad shape/length external work. Use get_work_properties'
+            msg += ' function to find the right properties for work arrays.'
+            assert len(self._rwork) == worklength, msg
+            for wk in self._rwork:
+                assert wk.shape == memshape
+
+    @debug
+    @timed_function
+    def apply(self, simulation=None):
+        ite = simulation.currentIteration
+
+        # Reset reprojection frequency to default
+        self.frequency = self._default_frequency
+
+        # Synchronize ghost points of vorticity
+        self._synchronize(self.vorticity.data)
+        # gradU computation
+        self._rwork = self._function(self.vorticity.data, self._rwork)
+        nbComponents = self.vorticity.nbComponents
+        # maxima of vorticity divergence (abs)
+        d1 = np.max(abs(sum([(self._rwork[(nbComponents + 1) * i])
+                             for i in xrange(nbComponents)])))
+        # maxima of partial derivatives of vorticity
+        d2 = 0.0
+        for grad_n in self._rwork:
+            d2 = max(d2, np.max(abs(grad_n)))
+
+        # computation of the reprojection criterion and mpi-reduction
+        criterion = d1 / d2
+        criterion = self.vorticity.topology.comm.allreduce(
+            criterion, PARMES_MPI_REAL, op=MPI.MAX)
+        # is reprojection of vorticity needed for the next time step ?
+        if criterion > self.threshold:
+            self.frequency = 1
+
+        # update counter
+        if self.do_projection(ite):
+            self._counter += 1
+
+        # Print results, if required
+        # Remark : writer buffer is (pointer) connected to diagnostics
+        if self._writer is not None and self._writer.do_write(ite):
+            self._writer.buffer[0, 0] = simulation.time
+            self._writer.buffer[0, 1] = d1
+            self._writer.buffer[0, 2] = d2
+            self._writer.buffer[0, 3] = self._counter
+            self._writer.write()
+
+    def do_projection(self, ite):
+        """
+        True if projection must be done
+        """
+        return ite % self.frequency == 0
diff --git a/HySoP/hysop/operator/discrete/stretching.py b/HySoP/hysop/operator/discrete/stretching.py
index 146c5418b98fc21b969ef6421df317e8e74e08f5..bb526848dd8389d0224f9601fe19fd185b6932d6 100755
--- a/HySoP/hysop/operator/discrete/stretching.py
+++ b/HySoP/hysop/operator/discrete/stretching.py
@@ -14,8 +14,9 @@ from parmepy.numerics.integrators.runge_kutta3 import RK3
 from parmepy.numerics.integrators.runge_kutta4 import RK4
 import parmepy.numerics.differential_operations as diff_op
 import parmepy.tools.numpywrappers as npw
-from parmepy.numerics.updateGhosts import UpdateGhosts
+from parmepy.numerics.update_ghosts import UpdateGhosts
 from parmepy.mpi import MPI
+from parmepy.tools.profiler import profile
 from abc import ABCMeta, abstractmethod
 import math
 ceil = math.ceil
@@ -32,79 +33,76 @@ class Stretching(DiscreteOperator):
     __metaclass__ = ABCMeta
 
     @debug
-    def __init__(self, velocity, vorticity, method=None):
+    def __init__(self, velocity, vorticity, formulation, **kwds):
         """
-        @param velocity : discrete field
-        @param vorticity : discrete field
-        @param method : numerical method for space/time discretizations
-        Default = {TimeIntegrator: RK3, SpaceDiscretisation: FD_C_4}
+        @param velocity : discretization of the velocity field
+        @param vorticity : discretization of the vorticity field
+        @param formulation : chosen formulation to compute the stretching:
+        Conservative or GradUV.
         """
         ## velocity discrete field
         self.velocity = velocity
         ## vorticity discrete field
         self.vorticity = vorticity
-        if method is None:
-            import parmepy.default_methods as default
-            method = default.STRETCHING
-        DiscreteOperator.__init__(self, [self.velocity, self.vorticity],
-                                  method=method)
+        ## Formulation for stretching (divWV or GradVxW)
+        self.formulation = formulation
 
-        self.input = [self.velocity, self.vorticity]
+        if 'method' not in kwds:
+            import parmepy.default_methods as default
+            kwds['method'] = default.STRETCHING
+        # Work vector used by time-integrator
+        self._ti_work = None
+        # Work vector used by numerical diff operator.
+        self._str_work = None
+        super(Stretching, self).__init__(variables=[self.velocity,
+                                                    self.vorticity], **kwds)
+
+        self.input = self.variables
         self.output = [self.vorticity]
         # \todo multiresolution case
-        assert self.velocity.topology == self.vorticity.topology,\
+        assert self.velocity.topology.mesh == self.vorticity.topology.mesh,\
             'Multiresolution case not yet implemented.'
 
         ## Number of components of the operator (result)
         self.nbComponents = 3  # Stretching only in 3D and for vector fields.
 
-        ## stability constant
-        self.cststretch = 0.
-        # Depends on time integration method
-        timeint = self.method[TimeIntegrator]
-        classtype = timeint.mro()[0]
-        if classtype is Euler:
-            self.cststretch = 2.0
-        elif classtype is RK2:
-            self.cststretch = 2.0
-        elif classtype is RK3:
-            self.cststretch = 2.5127
-        elif classtype is RK4:
-            self.cststretch = 2.7853
         # prepare ghost points synchro for velocity
         self._synchronize = UpdateGhosts(self.velocity.topology,
                                          self.velocity.nbComponents
                                          + self.vorticity.nbComponents)
-        ## Formulation used to compute stretching (default = DivWV)
-        self.formulation = diff_op.DivWV
-
-    def setUp(self):
 
-        memshape = self.velocity.data[0].shape
-        # work list length for time-integrator
-        self._work_length_ti = self.method[TimeIntegrator].getWorkLengths(3)
-        # work list length for DivWV operation.
-        self._work_length_str = self.formulation.getWorkLengths()
-        if not self.hasExternalWork:
-            self._worklength = self._work_length_str + self._work_length_ti
-            self._rwork = [npw.zeros(memshape)
-                           for i in xrange(self._worklength)]
-        else:
-            assert len(self._rwork) == self._work_length_ti + \
-                self._work_length_str
-            for wk in self._rwork:
-                assert wk.shape == memshape
-
-        self.time_int_work = self._rwork[:self._work_length_ti]
-        self.str_work = self._rwork[self._work_length_ti:]
         # A function to compute the gradient of a vector field
         # Work vector is provided in input.
         self.strFunc = \
             self.formulation(self.velocity.topology,
-                             self.str_work, method=
-                             self.method[SpaceDiscretisation])
+                             self._str_work,
+                             method=self.method[SpaceDiscretisation])
 
-    def updateGhosts(self):
+    def _set_work_arrays(self, rwork=None, iwork=None):
+
+        #v_ind = self.velocity.topology.mesh.iCompute
+        shape_v = self.velocity.data[0][...].shape
+        ti = self.method[TimeIntegrator]
+        # work list length for time-integrator
+        work_length_ti = ti.getWorkLengths(3)
+        rwork_length = work_length_ti + self.formulation.getWorkLengths()
+        # setup for rwork, iwork is useless.
+        if rwork is None:
+            # ---  Local allocation ---
+            self._rwork = []
+            for _ in xrange(rwork_length):
+                self._rwork.append(npw.zeros(shape_v))
+        else:
+            assert isinstance(rwork, list), 'rwork must be a list.'
+            # --- External rwork ---
+            self._rwork = rwork
+            assert len(self._rwork) == rwork_length
+            for wk in rwork:
+                assert wk.shape == shape_v
+        self._ti_work = self._rwork[:work_length_ti]
+        self._str_work = self._rwork[work_length_ti:]
+
+    def update_ghosts(self):
         """
         Update ghost points values
         """
@@ -113,6 +111,7 @@ class Stretching(DiscreteOperator):
     @abstractmethod
     def apply(self, simulation=None):
         """
+        Abstract interface
         """
 
 
@@ -121,21 +120,8 @@ class Conservative(Stretching):
     Discretisation of the following problem :
     \f{eqnarray*} \frac{\partial\omega}{\partial t} = \nabla.(\omega:v) \f}
     """
-    def __init__(self, velocity, vorticity, method=None):
-        Stretching.__init__(self, velocity, vorticity, method)
-        self.formulation = diff_op.DivWV
-
-    @staticmethod
-    def getWorkLengths(method=None, domain_dim=3):
-        assert TimeIntegrator in method,\
-            'A time integrator is required for the stretching.'
-        # Stretching occurs only in 3D
-        rwork_length = method[TimeIntegrator].getWorkLengths(domain_dim)
-        rwork_length += diff_op.DivWV.getWorkLengths()
-        return rwork_length, 0
-
-    def setUp(self):
-        Stretching.setUp(self)
+    def __init__(self, **kwds):
+        super(Conservative, self).__init__(formulation=diff_op.DivWV, **kwds)
 
         # Time integration scheme.
         def rhs(t, y, result):
@@ -145,19 +131,16 @@ class Conservative(Stretching):
         # Create the time integrator
         self.timeIntegrator = \
             self.method[TimeIntegrator](self.nbComponents,
-                                        self.time_int_work,
+                                        self._ti_work,
                                         self.velocity.topology,
                                         f=rhs,
                                         optim=WITH_GUESS)
-        self._isUpToDate = True
 
+    @debug
+    @profile
     def apply(self, simulation=None):
-        if simulation is None:
-            raise ValueError("Missing simulation value for computation.")
-
-        # Calling for requirements completion
-        DiscreteOperator.apply(self, simulation)
-        ctime = MPI.Wtime()
+        assert simulation is not None, \
+            "Missing simulation value for computation."
 
         # time step
         dt = simulation.timeStep
@@ -168,45 +151,23 @@ class Conservative(Stretching):
         # - Call time integrator -
         # Init workspace with a first evaluation of the
         # rhs of the integrator
-        self.time_int_work[:self.nbComponents] = \
+        self._ti_work[:self.nbComponents] = \
             self.timeIntegrator.f(t, self.vorticity.data,
-                                  self.time_int_work[:self.nbComponents])
+                                  self._ti_work[:self.nbComponents])
         # perform integration and save result in-place
         self.vorticity.data = self.timeIntegrator(t, self.vorticity.data, dt,
                                                   result=self.vorticity.data)
-        self._apply_timer.append_time(MPI.Wtime() - ctime)
 
 
 class GradUW(Stretching):
-    """ Discretisation of the following problem:
+    """
+    Discretisation of the following problem:
     \f{eqnarray*} \frac{\partial\omega}{\partial t}=[\nabla(v)][\omega]\f}
     """
 
-    def __init__(self, velocity, vorticity, method=None):
-        Stretching.__init__(self, velocity, vorticity, method)
-        self.formulation = diff_op.GradVxW
+    def __init__(self, **kwds):
+        super(GradUW, self).__init__(formulation=diff_op.GradVxW, **kwds)
 
-    @staticmethod
-    def getWorkLengths(method=None, domain_dim=3):
-        """
-        Return the length of working arrays lists required
-        for stretching discrete operator, depending on :
-        - the formulation (Conservative or GradUW)
-        - the time integrator (RK3, ...)
-        @param method : the dict of parameters for the operator.
-        """
-        if method is None:
-            import parmepy.default_methods as default
-            method = default.STRETCHING
-        assert TimeIntegrator in method,\
-            'A time integrator is required for the stretching.'
-        # Stretching occurs only in 3D
-        rwork_length = method[TimeIntegrator].getWorkLengths(domain_dim)
-        rwork_length += diff_op.GradVxW.getWorkLengths()
-        return rwork_length, 0
-
-    def setUp(self):
-        Stretching.setUp(self)
         ## a vector to save diagnostics computed from GradVxW (max div ...)
         self.diagnostics = npw.ones(2)
 
@@ -219,16 +180,29 @@ class GradUW(Stretching):
         # Create the time integrator
         self.timeIntegrator = \
             self.method[TimeIntegrator](self.nbComponents,
-                                        self.time_int_work,
+                                        self._ti_work,
                                         self.velocity.topology,
                                         f=rhs,
                                         optim=WITH_GUESS)
-        self._isUpToDate = True
+        ## stability constant
+        self.cststretch = 0.
+        # Depends on time integration method
+        timeint = self.method[TimeIntegrator]
+        classtype = timeint.mro()[0]
+        if classtype is Euler:
+            self.cststretch = 2.0
+        elif classtype is RK2:
+            self.cststretch = 2.0
+        elif classtype is RK3:
+            self.cststretch = 2.5127
+        elif classtype is RK4:
+            self.cststretch = 2.7853
 
+    @debug
+    @profile
     def apply(self, simulation=None):
-        # Calling for requirements completion
-        DiscreteOperator.apply(self, simulation)
-        ctime = MPI.Wtime()
+        assert simulation is not None, \
+            "Missing simulation value for computation."
 
         # time step
         dt = simulation.timeStep
@@ -245,15 +219,14 @@ class GradUW(Stretching):
         # Init workspace with a first evaluation of the
         # rhs of the integrator
         for i in xrange(ndt):
-            self.time_int_work[:self.nbComponents] = \
+            self._ti_work[:self.nbComponents] = \
                 self.timeIntegrator.f(t, self.vorticity.data,
-                                      self.time_int_work[:self.nbComponents])
+                                      self._ti_work[:self.nbComponents])
 
             # perform integration and save result in-place
             self.vorticity.data = \
                 self.timeIntegrator(t, self.vorticity.data, subdt[i],
                                     result=self.vorticity.data)
-        self._apply_timer.append_time(MPI.Wtime() - ctime)
 
     def checkStability(self, dt):
         """
diff --git a/HySoP/hysop/operator/discrete/velocity_correction.py b/HySoP/hysop/operator/discrete/velocity_correction.py
index 98e138921979719ddf0c8f85808361136d02992b..33a48c571dae1481365ea2d506a9656d1aa065ca 100755
--- a/HySoP/hysop/operator/discrete/velocity_correction.py
+++ b/HySoP/hysop/operator/discrete/velocity_correction.py
@@ -8,11 +8,9 @@ Correction of the velocity field.
 from parmepy.constants import debug
 from parmepy.operator.discrete.discrete import DiscreteOperator
 from parmepy.fields.variable_parameter import VariableParameter
-from parmepy.domain.obstacle.controlBox import ControlBox
-from parmepy.tools.timers import timed_function
+from parmepy.tools.profiler import profile
 import parmepy.tools.numpywrappers as npw
-from parmepy.constants import np, XDIR, YDIR, ZDIR
-from parmepy.mpi import MPI
+from parmepy.constants import XDIR, YDIR, ZDIR
 
 
 class VelocityCorrection_D(DiscreteOperator):
@@ -24,16 +22,19 @@ class VelocityCorrection_D(DiscreteOperator):
     """
 
     @debug
-    def __init__(self, velocity, vorticity, req_flowrate, cb):
+    def __init__(self, velocity, vorticity, req_flowrate, cb, **kwds):
         """
         @param[in, out] velocity field to be corrected
         @param[in] vorticity field used to compute correction
-        @param[in] req_flowrate : required value for the 
+        @param[in] req_flowrate : required value for the
         flowrate (VariableParameter object)
         @param[in] surf : surface (parmepy.domain.obstacle.planes.SubPlane)
         used to compute reference flow rates. Default = surface at x_origin,
         normal to x-dir.
         """
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        super(VelocityCorrection_D, self).__init__(
+            variables=[velocity, vorticity], **kwds)
         ## velocity discrete field
         self.velocity = velocity
         ## vorticity discrete field
@@ -43,23 +44,24 @@ class VelocityCorrection_D(DiscreteOperator):
         # If 2D problem, vorticity must be a scalar
         if self.dim == 2:
             assert self.vorticity.nbComponents == 1
-        assert (self.dim == 2 or self.dim == 3),\
+        assert (self.dim >= 2),\
             "Wrong problem dimension: only 2D and 3D cases are implemented."
 
-        DiscreteOperator.__init__(self, [self.velocity, self.vorticity])
-
-        self.input = [self.velocity, self.vorticity]
+        self.input = self.variables
         self.output = [self.velocity]
-        ## Velocity from topology will be used as reference
+        ## A reference topology
         self.topo = self.velocity.topology
-        hh = self.topo.mesh.space_step
+        ## Volume of control
         self.cb = cb
         self.cb.discretize(self.topo)
-        self.surfRef = cb.lowerS[XDIR]
-        normal = self.surfRef.normal
-        dirs = np.where(normal == 0)[0]
-        self.ds = np.prod(self.surfRef.lengths[dirs] + hh[dirs])
-
+        # A reference surface, i.e. input surface for flow in x direction
+        self._in_surf = cb.surf[XDIR]
+
+        sdirs = self._in_surf.t_dir
+        # Compute 1./ds and 1./dv ...
+        cb_length = self.cb.real_length[self.topo]
+        self._inv_ds = 1. / npw.prod(cb_length[sdirs])
+        self._inv_dvol = 1. / npw.prod(cb_length)
         ## Expected value for the flow rate through self.surfRef
         self.req_flowrate = req_flowrate
         assert isinstance(self.req_flowrate, VariableParameter),\
@@ -73,20 +75,13 @@ class VelocityCorrection_D(DiscreteOperator):
         self.rates = npw.zeros(nbf)
         self.req_flowrate_val = None
 
-        # Open file for output
-        import parmepy.tools.io_utils as io
-        io_default={"filename":'om_bar'}
-        self.fname = io.Writer(params=io_default).filename
-        if (self.topo.rank == 0):
-            self.f = open(self.fname, 'w')
-
-    def setUp(self):
         spaceStep = self.topo.mesh.space_step
         lengths = self.topo.domain.length
         self.coeff_mean = npw.prod(spaceStep) / npw.prod(lengths)
-        x0 = self.surfRef.origin[XDIR]
+        x0 = self._in_surf.real_orig[self.topo][XDIR]
+        # Compute X - X0, x0 being the coordinate of the 'entry'
+        # surface for the flow.
         self.x_coord = self.topo.mesh.coords[XDIR] - x0
-        self._isUpToDate = True
 
     def computeCorrection(self):
         """
@@ -95,17 +90,16 @@ class VelocityCorrection_D(DiscreteOperator):
         """
         ## Computation of the flowrates evaluated from
         ## current (ie non corrected) velocity
-        ## local flow reduced only on proc 0
         nbf = self.velocity.nbComponents + self.vorticity.nbComponents
         localrates = npw.zeros((nbf))
         for i in xrange(self.velocity.nbComponents):
-            localrates[i] = self.velocity.integrateOnSurf_proc(self.surfRef,
-                                                               component=i)
+            localrates[i] = self._in_surf.integrate_dfield_on_proc(
+                self.velocity, component=i)
         start = self.velocity.nbComponents
         ## Integrate vorticity over the whole domain
         for i in xrange(self.vorticity.nbComponents):
-            localrates[start + i] = \
-                self.vorticity.integrate_on_proc(self.cb, component=i)
+            localrates[start + i] = self.cb.integrate_dfield_on_proc(
+                self.vorticity, component=i)
 
         # MPI reduction for rates
         # rates = [flowrate[X], flowrate[Y], flowrate[Z],
@@ -114,30 +108,26 @@ class VelocityCorrection_D(DiscreteOperator):
         self.rates[...] = 0.0
         self.velocity.topology.comm.Allreduce(localrates, self.rates)
 
-        dlengths = npw.prod(self.cb.lengths)
-        for i in xrange(self.vorticity.nbComponents):
-            self.rates[start + i] /= dlengths
-
+        self.rates[:start] *= self._inv_ds
+        self.rates[start:] *= self._inv_dvol
+ 
         # Set velocity_shift == [Vx_shift, vort_mean[Y], vort_mean[Z]]
         # or (in 2D) velocity_shift == [Vx_shift, vort_mean]
         # Velocity shift for main dir component
         self.velocity_shift[XDIR] = self.req_flowrate_val[XDIR]\
-            - self.rates[XDIR] / self.ds
+            - self.rates[XDIR]
         # Shifts in other directions depend on x coord
         # and will be computed during apply.
 
-    @timed_function
+    @debug
+    @profile
     def apply(self, simulation=None):
-        # Calling for requirements completion
-        DiscreteOperator.apply(self, simulation)
-        ctime = MPI.Wtime()
-
         # the required flowrate value is updated (depending on time)
         self.req_flowrate.update(simulation)
 
-        # warning : the flow rate value is divided with area of input surf.
+        # warning : the flow rate value is divided by surf.
         self.req_flowrate_val = self.req_flowrate[self.req_flowrate.name] \
-            / self.ds
+            * self._inv_ds
         # Computation of the required velocity shift
         # for the current state
         self.computeCorrection()
@@ -150,28 +140,21 @@ class VelocityCorrection_D(DiscreteOperator):
         #                         vort_mean[X], vort_mean[Y], vort_mean[Z]]
         # or (in 2D) [vx_shift, flowrates[Y], vort_mean]
         vort_mean = self.rates[start:]
-        t = simulation.time
         ite = simulation.currentIteration
-        if self.topo.rank == 0:
-            self.f = open(self.fname, 'a')
-            self.f.write("%s      %s      %s      %s      %s \n" % (t, 
-                                                                    ite, 
-                                                                    vort_mean[0], 
-                                                                    vort_mean[1], 
-                                                                    vort_mean[2]))
-            self.f.close()
+        if self._writer is not None and self._writer.do_write(ite):
+            self._writer.buffer[0, 0] = simulation.time
+            self._writer.buffer[0, 1] = ite
+            self._writer.buffer[0, 2:] = vort_mean[...]
+            self._writer.write()
 
         if self.dim == 2:
             # Correction of the Y-velocity component
             self.velocity[YDIR][...] += self.req_flowrate_val[YDIR] + \
-                vort_mean[XDIR] * self.x_coord - self.rates[YDIR] / self.ds
+                vort_mean[XDIR] * self.x_coord - self.rates[YDIR]
 
         elif self.dim == 3:
             # Correction of the Y and Z-velocity components
-            self.velocity[YDIR][...] +=  self.req_flowrate_val[YDIR] + \
-                vort_mean[ZDIR] * self.x_coord - self.rates[YDIR] / self.ds
+            self.velocity[YDIR][...] += self.req_flowrate_val[YDIR] + \
+                vort_mean[ZDIR] * self.x_coord - self.rates[YDIR]
             self.velocity[ZDIR][...] += self.req_flowrate_val[ZDIR] - \
-                vort_mean[YDIR] * self.x_coord - self.rates[ZDIR] / self.ds
-
-        self._apply_timer.append_time(MPI.Wtime() - ctime)
-
+                vort_mean[YDIR] * self.x_coord - self.rates[ZDIR]
diff --git a/HySoP/hysop/operator/energy_enstrophy.py b/HySoP/hysop/operator/energy_enstrophy.py
index 7a51b20eae182e699baa8de12b400522a034f367..f193018f3bf66c819e68112ea0977b38aaa99c85 100644
--- a/HySoP/hysop/operator/energy_enstrophy.py
+++ b/HySoP/hysop/operator/energy_enstrophy.py
@@ -3,14 +3,12 @@
 @file energy_enstrophy.py
 Compute Energy and Enstrophy
 """
-import numpy as np
-from parmepy.constants import debug, XDIR
-from parmepy.operator.monitors.monitoring import Monitoring
-from parmepy.tools.timers import timed_function
-import parmepy.tools.numpywrappers as npw
+from parmepy.operator.discrete.energy_enstrophy import EnergyEnstrophy as DEE
+from parmepy.operator.computational import Computational
+from parmepy.operator.continuous import opsetup
 
 
-class Energy_enstrophy(Monitoring):
+class EnergyEnstrophy(Computational):
     """
     Computes enstrophy and the kinetic energy
     \f{eqnarray*}
@@ -22,127 +20,68 @@ class Energy_enstrophy(Monitoring):
     \f}
     """
 
-    def __init__(self, velocity, vorticity,
-                 viscosity, isNormalized, **kwds):
+    def __init__(self, velocity, vorticity, is_normalized=True, **kwds):
         """
         Constructor.
         @param velocity field
         @param vorticity field
-        @param viscosity : kinematic viscosity
         @param isNormalized : boolean indicating whether the enstrophy
         and energy values have to be normalized by the domain lengths.
-        @param topo : the topology on which we want to monitor the fields
-        @param io_params : parameters (dict) to set file output.
-        If  None, no output. Set io_params = {} if you want output,
-        with default parameters values. 
+
         Default file name = 'energy_enstrophy.dat'
         See parmepy.tools.io_utils.Writer for details
         """
-        if 'io_params' in kwds:
-            params = kwds['io_params']
-            if not "filename" in params:
-                params["filename"] = "energy_enstrophy"
-            # Set output buffer shape
-            params["writebuffshape"] = (1, 3)
-        super(Energy_enstrophy, self).__init__(variables=[velocity, vorticity],
-                                               **kwds)
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        super(EnergyEnstrophy, self).__init__(variables=[velocity, vorticity],
+                                              **kwds)
         ## velocity field
         self.velocity = velocity
         ## vorticity field
         self.vorticity = vorticity
-        ## viscosity (scalar)
-        self.viscosity = viscosity
         ## are the energy end enstrophy values normalized by domain lengths ?
-        self.isNormalized = isNormalized
+        self.is_normalized = is_normalized
         ## self._buffer_1 = 0.
         ## self._buffer_2 = 0.
         self.input = [velocity, vorticity]
         self.output = []
-        # \todo : rewrite for multiresolution case.
-        # Note FP : for multiresolution case, it would probably be
-        # better to use two different operators for energy and enstrophy.
 
-    def setUp(self):
-        if not self._isUpToDate:
-            self.discreteFields[self.velocity] = \
-                self.velocity.discretization(self.topology)
-            self.discreteFields[self.vorticity] =\
-                self.vorticity.discretization(self.topology)
+    def get_work_properties(self):
+        if not self._is_discretized:
+            msg = 'The operator must be discretized '
+            msg += 'before any call to this function.'
+            raise RuntimeError(msg)
+        vd = self.discreteFields[self.velocity]
+        wd = self.discreteFields[self.vorticity]
+        v_ind = vd.topology.mesh.iCompute
+        w_ind = wd.topology.mesh.iCompute
+        shape_v = vd[0][v_ind].shape
+        shape_w = wd[0][w_ind].shape
+        if shape_v == shape_w:
+            return {'rwork': [shape_v], 'iwork': None}
+        else:
+            return {'rwork': [shape_v, shape_w], 'iwork': None}
 
-            spaceStep = self.topology.mesh.space_step
-            length = self.topology.domain.length
-            if self.isNormalized:
-                self.coeffEnstrophy = (np.prod(spaceStep) /
-                                       np.prod(length))
-            else:
-                self.coeffEnstrophy = np.prod(spaceStep)
-            self.coeffEnergy = 0.5 * self.coeffEnstrophy
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
+        if not self._is_uptodate:
 
-            # A work vector for local computations
-            # Warning : we assume one topo for all variables
-            self.ind = self.topology.mesh.iCompute
-            shape = self.discreteFields[self.velocity].data[0][self.ind].shape
-            self._work = npw.zeros(shape)
-            self._isUpToDate = True
+            self.discrete_op = DEE(self.discreteFields[self.velocity],
+                                        self.discreteFields[self.vorticity],
+                                        self.is_normalized,
+                                        rwork=rwork)
+            # Output setup
+            self._set_io('energy_enstrophy', (1, 3))
+            self.discrete_op.setWriter(self._writer)
+            self._is_uptodate = True
 
-    @debug
-    @timed_function
-    def apply(self, simulation=None):
+    def energy(self):
         """
-        Computation of kinetic energy, enstrophy &
-        Checking energy and enstrophy decay
+        Return last computed value of the energy
         """
-        if simulation is None:
-            raise ValueError("Missing simulation value for computation.")
-
-        #time = MPI.Wtime()
-
-        # --- Kinetic energy computation ---
-        vd = self.discreteFields[self.velocity]
-        # get the list of computation points (no ghosts)
-        nbc = vd.nbComponents
-        # Integrate (locally) velocity ** 2
-        self._work[...] = vd[XDIR][self.ind] ** 2
-        [np.add(self._work[...], vd[i][self.ind] ** 2, self._work[...])
-         for i in xrange(1, nbc)]
-        local_energy = np.sum(self._work)
-
-        # --- Enstrophy computation ---
-        vortd = self.discreteFields[self.vorticity]
-        nbc = vortd.nbComponents
-        # Integrate (locally) vorticity ** 2
-        self._work[...] = vortd[0][self.ind] ** 2
-        [np.add(self._work[...], vortd[i][self.ind] ** 2, self._work[...])
-         for i in xrange(1, nbc)]
-        local_enstrophy = np.sum(self._work)
+        return self.discrete_op.energy
 
-        # --- Reduce energy and enstrophy values overs all proc ---
-        # two ways : numpy or classical. Todo : check perf and comm
-        sendbuff = npw.zeros((2))
-        recvbuff = npw.zeros((2))
-        sendbuff[:] = [local_energy, local_enstrophy]
-        self.topology.comm.Allreduce(sendbuff, recvbuff)
-        # the other way :
-        #energy = self._topovel.topo.allreduce(local_energy, PARMES_MPI_REAL,
-        #                                     op=MPI.SUM)
-        #enstrophy = self._topovel.topo.allreduce(local_enstrophy,
-        #                                        PARMES_MPI_REAL,
-        #                                        op=MPI.SUM)
-
-        # Update global values
-        energy = recvbuff[0] * self.coeffEnergy
-        enstrophy = recvbuff[1] * self.coeffEnstrophy
-
-        # Update buffers
-        ## energyBuff1 = self._buffer_1
-        ## energyBuff2 = self._buffer_2
-        ## self._buffer_2 = self._buffer_1
-        ## self._buffer_1 = energy
-
-        # Print results, if required
-        ite = simulation.currentIteration
-        if self._writer is not None and self._writer.doWrite(ite):
-            self._writer.buffer[0, 0] = simulation.time
-            self._writer.buffer[0, 1] = energy
-            self._writer.buffer[0, 2] = enstrophy
-            self._writer.write()
+    def enstrophy(self):
+        """
+        Return last computed value of the enstrophy
+        """
+        return self.discrete_op.enstrophy
diff --git a/HySoP/hysop/operator/hdf_io.py b/HySoP/hysop/operator/hdf_io.py
new file mode 100644
index 0000000000000000000000000000000000000000..a328f0379b1eceb116cd30e61d0dc7a53f847943
--- /dev/null
+++ b/HySoP/hysop/operator/hdf_io.py
@@ -0,0 +1,337 @@
+"""
+@file printer.py
+
+File output for field(s) value on a grid.
+"""
+from parmepy.constants import S_DIR, debug, HDF5, PARMES_REAL
+from parmepy.operator.computational import Computational
+import parmepy.tools.numpywrappers as npw
+import parmepy.tools.io_utils as io
+from parmepy.tools.parameters import IO_params
+from abc import ABCMeta, abstractmethod
+
+try:
+    import h5py
+except ImportError as h5py_error:
+    h5py = None
+    msg_err = 'Warning, h5py not found, you may not be able to'
+    msg_err += ' use hdf5 I/O functionnalities.'
+    print msg_err
+
+from parmepy.tools.timers import timed_function
+
+
+class HDF_IO(Computational):
+    """
+    Abstract interface for read/write from/to hdf files, for
+    parmepy fields.
+    """
+
+    __metaclass__ = ABCMeta
+
+    def __init__(self, var_names=None, subset=None, **kwds):
+        """
+        Read/write some fields data from/into hdf/xmdf files.
+        Parallel io.
+        @param var_names : a dictionnary of names to connect fields
+        to the dataset in the hdf file. See example below.
+        @param subset : a subset of the domain, on which data are read.
+        It must be a parmepy.domain.obstacles.Obstacle.
+
+        Names paramater example:
+        if variables=[velo, vorti], and if hdf file contains
+        'vel_1_X, vel_1_Y, vel_1_Z, dat_2_X, dat_2_Y, dat_2_Z' keys, then
+        use :
+        names = {velo: 'vel', vorti:'dat'} if you want to read vel/dat
+        into velo/vorti.
+        Mind that when writing an hdf file, dataset names are set as :
+        field.name + topo.id + component direction.
+
+        If names=None, field names will be used as keys to search for
+        dataset.
+
+        """
+        super(HDF_IO, self).__init__(**kwds)
+
+        if h5py is None:
+            print ('You try to use HDF5 reader but h5py module ',)
+            print ('has not been found on your system.')
+            raise h5py_error
+
+        self.input = self.variables
+        self.output = self.variables
+
+        # If no filename is given, set it to
+        # the concatenation of variables'names.
+        if self.io_params is None:
+            name = ''
+            for var in self.input:
+                if var.name == 'unnamed':
+                    msg = 'Warning : you try to write an unnamed variable'
+                    msg += ' into hdf file. This may result in'
+                    msg += 'unexpected side effects.'
+                    print msg
+                name += var.name
+
+                name += '_'
+            self.io_params = IO_params(name, fileformat=HDF5)
+        else:
+            assert self.io_params.fileformat is HDF5
+
+        ## Set a subset of the original domain
+        self.subset = subset
+
+        ## Dictionnary of names to search in hdf file. May be None.
+        ## It will be checked during setup.
+        self.var_names = var_names
+
+        # Local topology, that MUST be common to all variables.
+        self._topology = None
+        self._slices = None
+        self._global_resolution = None
+        self._sl = None
+        # Dictionnary of discrete fields. Key = name in hdf file,
+        # Value = discrete field
+        self.dataset = {}
+        # Get hdf file name. Depends on read/write process. Must be
+        # defined in HDF_READER or _WRITER init.
+        self._get_filename = lambda i=None: None
+        # File Object that holds hdf file
+        self._hdf_file = None
+
+    def discretize(self):
+        super(HDF_IO, self)._standard_discretize()
+        assert self._single_topo, 'Multi-resolution case is not allowed.'
+        self._topology = self.variables.values()[0]
+
+        # Discretize the subset, if required
+        if self.subset is not None:
+            self.subset.discretize(self._topology)
+            self._slices = self.subset.slices[self._topology]
+            # Global resolution for hdf5 output
+            self._global_resolution = \
+                self.subset.global_resolution(self._topology)
+            g_start = self.subset.gstart
+            # convert self._slices to global position in topo
+            sl = self._topology.mesh.toIndexGlobal(self._slices)
+            # And shift using global position of the surface
+            sl = [slice(sl[i].start - g_start[i], sl[i].stop - g_start[i])
+                  for i in xrange(self.domain.dimension)]
+
+        else:
+            self._global_resolution = \
+                list(self._topology.mesh.discretization.resolution - 1)
+            self._slices = self._topology.mesh.iCompute
+            g_start = self._topology.mesh.global_start
+            g_end = self._topology.mesh.global_end + 1
+            sl = [slice(g_start[i], g_end[i])
+                  for i in xrange(self.domain.dimension)]
+
+        # Reverse order, to fit with xdmf req.
+        self._global_resolution.reverse()
+        sl.reverse()
+        self._sl = tuple(sl)
+
+    def setup(self, rwork=None, iwork=None):
+        msg = 'discretize must be called before setup.'
+        assert self._is_discretized, msg
+
+        # No list of hdf dataset names provided by user ...
+        if self.var_names is None:
+            # Get field names and initialize dataset dict.
+            for df in self.discreteFields.values():
+                for d in xrange(df.nbComponents):
+                    name = df.name + S_DIR[d]
+                    self.dataset[name] = df.data[d]
+        else:
+            for var in self.var_names:
+                # Discrete field associated to var
+                var_d = var.discretization(self._topology)
+                for d in xrange(var_d.nbComponents):
+                    name = self.var_names[var] + S_DIR[d]
+                    self.dataset[name] = var_d.data[d]
+
+    def open_hdf(self, count, mode):
+        filename = self._get_filename(count)
+        print 'readHDF field from ', filename
+        if self._topology.size == 1:
+            self._hdf_file = h5py.File(filename, mode)
+            compression = 'gzip'
+        else:
+            self._hdf_file = h5py.File(filename, mode, driver='mpio',
+                                       comm=self._topology.comm)
+            compression = None
+
+        return compression
+
+    @abstractmethod
+    def apply(self, simulation=None):
+        """
+        Abstract interface to read/write process
+        """
+
+
+class HDF_Writer(HDF_IO):
+    """
+    Print field(s) values on a given topo, in HDF5 format.
+    """
+    def __init__(self, xmfalways=True, **kwds):
+        """
+        Write some fields data into hdf/xmdf files.
+        Parallel writings.
+        @param xmfalways : true if xmf output must be done every time
+        an hdf5 file is created
+        """
+        super(HDF_Writer, self).__init__(**kwds)
+
+        # count the number of calls
+        self._count = 0
+
+        self.step = self._step_HDF5
+        if xmfalways:
+            self.step = self._step_HDF5_XMF
+            self.finalize = lambda: 1
+        else:
+            self.finalize = self.createXMFFile
+        self._xdmf_data_files = []
+
+        # filename = prefix_N, N = counter value
+        self._get_filename = lambda i: self.io_params.filename + \
+            "_{0:05d}".format(i) + '.h5'
+
+    @debug
+    @timed_function
+    def apply(self, simulation=None):
+        if simulation is None:
+            raise ValueError("Missing simulation value for monitoring.")
+        ite = simulation.currentIteration
+        if ite % self.io_params.frequency == 0:
+            # Transfer from GPU to CPU if required
+            for v in self.variables:
+                df = self.discreteFields[v]
+                try:
+                    if not df.isBatch:
+                        # To host only if data fit in the device memory
+                        df.toHost()
+                        df.wait()
+                except AttributeError:
+                    pass
+            self.step(simulation)
+            self._count += 1
+
+    def createXMFFile(self):
+        """
+        Create and fill the xdmf file
+        """
+
+        if self._mpis.rank == self.io_params.io_leader:
+            print "CREATE XMF ", self.io_params.filename
+            f = open(self.io_params.filename + '.xmf', 'w')
+            f.write("<?xml version=\"1.0\" ?>\n")
+            f.write("<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\">\n")
+            f.write("<Xdmf Version=\"2.0\">\n")
+            f.write(" <Domain>\n")
+            f.write("  <Grid Name=\"CellTime\" GridType=\"Collection\" ")
+            f.write("CollectionType=\"Temporal\">\n")
+            ds_names = self.dataset.keys()
+            for i, t in self._xdmf_data_files:
+                f.write(io.XMF.write_grid_attributes(
+                    self._topology, ds_names, i, t, self._get_filename(i),
+                    self.subset))
+            f.write("  </Grid>\n")
+            f.write(" </Domain>\n")
+            f.write("</Xdmf>\n")
+            f.close()
+
+#    def finalize(self):
+#        self.createXMFFile()
+
+    def _step_HDF5(self, simu):
+        """
+        Write a h5 file with data on each mpi process.
+        """
+        # Remarks:
+        # - force np.float64, ParaView seems to not be able to read float32
+        # - writing compressed hdf5 files (gzip compression seems the best)
+        # - gzip compression does not work in parallel.
+
+        # Get 'current' filename. It depends on the number
+        # of the current output (count) and on the current process
+        # rank.
+        compression = self.open_hdf(self._count, mode='w')
+        # Get the names of output variables and create the corresponding
+        # datasets
+        for name in self.dataset:
+            ds = self._hdf_file.create_dataset(name,
+                                               self._global_resolution,
+                                               dtype=PARMES_REAL,
+                                               compression=compression)
+            # In parallel, each proc must write at the right place
+            # of the dataset --> use self._slices.
+            ds[self._sl] = npw.asrealarray(self.dataset[name][self._slices].T)
+
+        # Collect datas required to write the xdmf file
+        # --> add tuples (name, counter, time).
+        self._xdmf_data_files.append((self._count, simu.time))
+        self._hdf_file.close()
+
+    def _step_HDF5_XMF(self, simu):
+        self._step_HDF5(simu)
+        self.createXMFFile()
+
+
+class HDF_Reader(HDF_IO):
+    """
+    Parallel reading of hdf/xdmf files to fill some fields in.
+    """
+    def __init__(self, restart=None, **kwds):
+        """
+        Read some fields data into hdf/xmdf files.
+        Parallel readings.
+        @param restart : set this to read a file corresponding to
+        a specific iteration.
+        See examples in tests_hdf_io.py
+        """
+        super(HDF_Reader, self).__init__(**kwds)
+        self.restart = restart
+        if self.restart is not None:
+            # filename = prefix_N, N = counter value
+            self._get_filename = lambda i=self.restart: \
+                self.io_params.filename + "_{0:05d}".format(i) + '.h5'
+        else:
+            self._get_filename = lambda i=None: self.io_params.filename
+
+    @debug
+    @timed_function
+    def apply(self, simulation=None):
+        # Read HDF file
+        self.open_hdf(count=self.restart, mode='r')
+
+        # Get the list of dataset names available in the hdf file
+        dsnames = self.dataset_names()
+        # And check if required dataset (from self.dataset)
+        # are all in this list.
+        msg = 'You try to read a dataset not present in hdf file : '
+        for name in self.dataset:
+            #            for d in xrange(self.dataset[name].nbComponents):
+            assert name in dsnames, msg + name
+            # Read data
+            self.dataset[name][self._slices] = self._hdf_file[name][self._sl].T
+
+        self._hdf_file.close()
+        # Set to None to check if it is closed in finalize
+        self._hdf_file = None
+        # Do we need a CPU->GPU transfer here?
+
+    def dataset_names(self):
+        """
+        Return the list of available names for datasets in
+        the required file.
+        """
+        if self._hdf_file is None:
+            self.open_hdf(count=self.restart, mode='r')
+        return self._hdf_file.keys()
+
+    def finalize(self):
+        if self._hdf_file is not None:
+            self._hdf_file.close()
diff --git a/HySoP/hysop/operator/monitors/__init__.py b/HySoP/hysop/operator/monitors/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0864718ff1a615f87055b381f04dd9620ed75b6
--- /dev/null
+++ b/HySoP/hysop/operator/monitors/__init__.py
@@ -0,0 +1,18 @@
+## @package parmepy.operator.monitors
+# Parmes tools for data and fields monitoring.
+#
+# import and alias so that monitors are
+# available with
+# from parmepy.operators.monitors import Printer, ...
+from parmepy.operator.monitors import printer, reader
+Printer = printer.Printer
+Reader = reader.Reader
+from parmepy.operator.monitors import compute_forces, reprojection_criterion
+DragAndLift = compute_forces.DragAndLift
+Reprojection_criterion = reprojection_criterion.Reprojection_criterion
+from parmepy.operator.monitors import energy_enstrophy
+Energy_enstrophy = energy_enstrophy.Energy_enstrophy
+
+# Set list for 'import *'
+__all__ = ['Reader', 'Printer', 'DragAndLift', 'Energy_enstrophy',
+           'Reprojection_criterion', 'Printer']
diff --git a/HySoP/hysop/operator/monitors/compute_forces.py b/HySoP/hysop/operator/monitors/compute_forces.py
deleted file mode 100644
index 701f711a8de71290d1c2fb47a0639d9d4d6d93b3..0000000000000000000000000000000000000000
--- a/HySoP/hysop/operator/monitors/compute_forces.py
+++ /dev/null
@@ -1,296 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file compute_forces.py
-Compute forces
-"""
-from parmepy.constants import np, XDIR, YDIR, ZDIR
-from parmepy.numerics.updateGhosts import UpdateGhosts
-from parmepy.numerics.differential_operations import Laplacian
-from parmepy.numerics.finite_differences import FD_C_2
-from parmepy.operator.monitors.monitoring import Monitoring
-import parmepy.tools.numpywrappers as npw
-
-
-class DragAndLift(Monitoring):
-    """
-    Compute drag and lift using Noca's formula.
-    See Noca99 or Plouhmans, 2002, Journal of Computational Physics
-    The present class implements formula (52) of Plouhmans2002.
-    Integral inside the obstacle is not taken into account.
-    """
-    def __init__(self, velocity, vorticity, nu, coefForce,
-                 volumeOfControl, obstacles=None, **kwds):
-        """
-        @param velocity field
-        @param vorticity field
-        @param nu viscosity
-        @param the topology on which forces will be computed
-        @param a volume of control
-        (parmepy.domain.obstacle.controlBox.ControlBox object)
-        @param obstacles a list of parmepy.domain.obstacles inside
-        the box
-        @param io_params : parameters (dict) to set file output.
-        If  None, no output. Set io_params = {} if you want output,
-        with default parameters values. Default file name = 'drag_and_lift'
-        See parmepy.tools.io_utils.Writer for details
-        """
-        if 'io_params' in kwds:
-            params = kwds['io_params']
-            if not "filename" in params:
-                params["filename"] = "drag_and_lift"
-            # Set output buffer shape
-            params["writebuffshape"] = (1, 4)
-        super(DragAndLift, self).__init__(variables=[velocity, vorticity],
-                                          **kwds)
-
-        self.velocity = velocity
-        self.vorticity = vorticity
-        self._voc = volumeOfControl
-        self._dim = self.domain.dimension
-        msg = 'Force computation undefined for domain of dimension 1.'
-        assert self._dim > 1, msg
-        if obstacles is None:
-            obstacles = []
-        ## A  list of obstacles (rigid bodies) in the control box
-        self.obstacles = obstacles
-        # Local buffers, used for time-derivative computation
-        self._previous = npw.zeros(self._dim)
-        self._buffer = npw.zeros(self._dim)
-        ## The computed forces
-        self.force = npw.zeros(self._dim)
-        # Coef in the Noca formula
-        self._coeff = 1. / (self._dim - 1)
-        # Set how reduction will be performed
-        # Default = reduction over all process.
-        # \todo : add param to choose this option
-        self.mpi_sum = self._mpi_allsum
-        # Ghost points synchronizer
-        self._synchronize = None
-        # discrete velocity field
-        self.vd = None
-        # discrete vorticity field
-        self.wd = None
-        ## viscosity
-        self.nu = nu
-        # Normalizing coefficient for forces
-        # (based on the physics of the flow)
-        self.coefForce = coefForce
-
-    def _mpi_allsum(self):
-        """
-        Performs MPI reduction (sum result value over all process)
-        All process get the result of the sum.
-        """
-        self.force = self.topology.comm.allreduce(self.force)
-
-    def _mpi_sum(self, root=0):
-        """
-        Performs MPI reduction (sum result value over all process)
-        Result send only to 'root' process.
-        @param root : number of the process which get the result.
-        """
-        self.force = self.topology.comm.reduce(self.force, root=root)
-
-    def setUp(self):
-        """
-        """
-        if not self._isUpToDate:
-            self._step = np.asarray(self.topology.mesh.space_step)
-            self._dvol = np.prod(self._step)
-            self._work = npw.zeros(self.topology.mesh.resolution)
-            assert (self.topology.ghosts >= 1).all()
-            # function to compute the laplacian of a
-            # scalar field. Default fd scheme. (See Laplacian)
-            self._laplacian = Laplacian(self.topology)
-            # function used to compute first derivative of
-            # a scalar field in a given direction.
-            # Default = FD_C_2. Todo : set this as an input method value.
-            self._fd_scheme = FD_C_2(self.topology.mesh.space_step)
-
-            for v in self.variables:
-                # the discrete fields
-                self.discreteFields[v] = v.discretize(self.topology)
-            self.vd = self.discreteFields[self.velocity]
-            self.wd = self.discreteFields[self.vorticity]
-            self._voc.discretize(self.topology)
-            for obs in self.obstacles:
-                obs.discretize(self.topology)
-            # prepare ghost points synchro for velocity and vorticity used
-            # in fd schemes
-            self._synchronize = UpdateGhosts(self.topology,
-                                             self.vd.nbComponents
-                                             + self.wd.nbComponents)
-            self._isUpToDate = True
-
-    def apply(self, simulation=None):
-        """
-        Perform integrals on volume and surfaces of the control box
-        @param parmepy.problem.simulation : object describing
-        simulation parameters
-        """
-        assert simulation is not None,\
-            "Simulation parameter is required for DragAndLift apply."
-        dt = simulation.timeStep
-        ite = simulation.currentIteration
-
-        # Synchro of ghost points is required for fd schemes
-        self._synchronize(self.vd.data + self.wd.data)
-
-        # -- Integration over the volume of control --
-        # -1/(N-1) . d/dt int(x ^ w)
-        if self._voc.isEmpty[self.topology]:
-            self._buffer[...] = 0.0
-            self.force[...] = 0.0
-        else:
-            self._buffer = self._integrateOnBox(self._buffer)
-            self.force[...] = -1. / dt * self._coeff * (self._buffer
-                                                        - self._previous)
-
-        # Update previous for next time step ...
-        self._previous[...] = self._buffer[...]
-        # -- Integrals on surfaces --
-        ## for s in self._voc.upperS:
-        ##     self._buffer = self._integrateOnSurface(s, self._buffer)
-        ##     self.force += self._buffer
-        ## for s in self._voc.lowerS:
-        ##     self._buffer = self._integrateOnSurface(s, self._buffer)
-        ##     self.force += self._buffer
-        self._buffer = self._integrateOnSurface(self._voc.upperS[0], self._buffer)
-        self.force += self._buffer
-        self._buffer = self._integrateOnSurface(self._voc.lowerS[0], self._buffer)
-        self.force += self._buffer
-
-        # Reduce results over all MPI process in topo
-        self.mpi_sum()
-
-        self.force *= self.coefForce
-
-        # Print results, if required
-        if self._writer is not None and self._writer.doWrite(ite):
-            self._writer.buffer[0, 0] = simulation.time
-            self._writer.buffer[0, 1:] = self.force
-            self._writer.write()
-
-        return self.force
-
-    def _integrateOnSurface(self, surf, res):
-
-        res[...] = 0.0
-
-        if surf.isEmpty[self.topology]:
-            return res
-
-        # Get normal of the surface
-        normal = surf.normal
-        # Get indices of the surface
-        sl = surf.slices[self.topology]
-        coords = surf.coords[self.topology]
-        vdata = self.vd.data
-        wdata = self.wd.data
-        # i1 : normal dir
-        # i2 : other dirs
-        i1 = np.where(normal)[0][0]
-        i2 = np.where(normal == 0)[0]
-        dsurf = np.prod(self.topology.mesh.space_step[i2])
-        # Indices used for cross-product
-        j1 = [YDIR, ZDIR, XDIR]
-        j2 = [ZDIR, XDIR, YDIR]
-
-        # i1 component
-        res[i1] = normal[i1] * 0.5 * np.sum((-vdata[i1][sl] ** 2
-                                             + sum([vdata[j][sl] ** 2
-                                                    for j in i2])))
-        # other components
-        for j in i2:
-            res[j] = -normal[i1] * np.sum(vdata[i1][sl] * vdata[j][sl])
-
-        # Second part of integral on surface ...
-        buff = npw.zeros(vdata[0][sl].shape)
-        for j in i2:
-            buff[...] = vdata[j1[j]][sl] * wdata[j2[j]][sl]\
-                - vdata[j2[j]][sl] * wdata[j1[j]][sl]
-            res[i1] -= self._coeff * normal[i1] * np.sum(coords[j] * buff)
-            res[j] -= self._coeff * normal[i1] * coords[i1] * np.sum(buff)
-
-        # Last part
-        # Update fd schemes in order to compute laplacian and other derivatives
-        # only on the surface (i.e. for list of indices in sl)
-        self._laplacian.fd_scheme.computeIndices(sl)
-        for j in i2:
-            self._work[...] = self._laplacian(vdata[j], self._work)
-            res[i1] += self._coeff * self.nu * normal[i1]\
-                * np.sum(coords[j] * self._work[sl])
-            res[j] -= self._coeff * self.nu * normal[i1] * coords[i1] * \
-                np.sum(self._work[sl])
-        self._fd_scheme.computeIndices(sl)
-        self._fd_scheme.compute(vdata[i1], i1, self._work)
-        res[i1] += 2.0 * normal[i1] * self.nu * np.sum(self._work[sl])
-        for j in i2:
-            self._fd_scheme.compute(vdata[i1], j, self._work)
-            res[j] += normal[i1] * self.nu * np.sum(self._work[sl])
-            self._fd_scheme.compute(vdata[j], i1, self._work)
-            res[j] += normal[i1] * self.nu * np.sum(self._work[sl])
-
-        res *= dsurf
-        return res
-
-    def _integrateOnBox(self, res):
-        assert self._dim == 3, 'Not defined for dim < 3'
-        coords = self._voc.coords[self.topology]
-        wdata = self.wd.data
-        i1 = [YDIR, ZDIR, XDIR]
-        i2 = [ZDIR, XDIR, YDIR]
-        direc = 0
-        sl = self._voc.slices[self.topology]
-        for (i, j) in zip(i1, i2):
-            self._work[sl] = coords[i] * wdata[j][sl]
-            self._work[sl] -= coords[j] * wdata[i][sl]
-            for obs in self.obstacles:
-                for inds in obs.ind[self.topology]:
-                    self._work[inds] = 0.0
-            res[direc] = np.sum(self._work[sl])
-            direc += 1
-        res *= self._dvol
-        return res
-
-    def _integrateOnBox2(self, res):
-        assert self._dim == 3, 'Not defined for dim < 3'
-        coords = self.topology.mesh.coords
-        wdata = self.wd.data
-        i1 = [YDIR, ZDIR, XDIR]
-        i2 = [ZDIR, XDIR, YDIR]
-        direc = 0
-        ind = self._voc.ind[self.topology][0]
-        ilist = np.where(ind)
-        nb = len(ilist[0])
-        ind = self._voc.ind[self.topology][0]
-        for (i, j) in zip(i1, i2):
-            self._work.flat[:nb] = coords[i].flat[ilist[i]] * wdata[j][ind]\
-                - coords[j].flat[ilist[j]] * wdata[i][ind]
-            res[direc] = np.sum(self._work.flat[:nb])
-            direc += 1
-        res *= self._dvol
-        return res
-
-    def _integrateOnBoxLoop(self, res):
-        """
-        Integrate over the control box using python loops.
-        ---> wrong way, seems to be really slower although
-        it costs less in memory.
-        Used only for tests (timing).
-        """
-        assert self._dim == 3, 'Not defined for dim < 3'
-        coords = self.topology.mesh.coords
-        ind = self._voc.ind[self.topology][0]
-        ilist = np.where(ind)
-        wdata = self.wd.data
-        for(ix, iy, iz) in zip(ilist[0], ilist[YDIR], ilist[ZDIR]):
-            res[XDIR] += coords[YDIR][0, iy, 0] * wdata[ZDIR][ix, iy, iz]\
-                - coords[ZDIR][0, 0, iz] * wdata[YDIR][ix, iy, iz]
-            res[YDIR] += coords[ZDIR][0, 0, iz] * wdata[XDIR][ix, iy, iz]\
-                - coords[XDIR][ix, 0, 0] * wdata[ZDIR][ix, iy, iz]
-            res[ZDIR] += coords[XDIR][ix, 0, 0] * wdata[YDIR][ix, iy, iz]\
-                - coords[YDIR][0, iy, 0] * wdata[XDIR][ix, iy, iz]
-
-        res *= self._dvol
-        return res
diff --git a/HySoP/hysop/operator/monitors/compute_forces.pyc b/HySoP/hysop/operator/monitors/compute_forces.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2a09c37f9fcade949f2e465e659ae1b2d891957f
Binary files /dev/null and b/HySoP/hysop/operator/monitors/compute_forces.pyc differ
diff --git a/HySoP/hysop/operator/monitors/energy_enstrophy.py b/HySoP/hysop/operator/monitors/energy_enstrophy.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5998ee833bdc0560d9cc754df0f47e1aebbcef5
--- /dev/null
+++ b/HySoP/hysop/operator/monitors/energy_enstrophy.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+"""
+@file energy_enstrophy.py
+Compute Energy and Enstrophy
+"""
+from parmepy.operator.discrete.energy_enstrophy import EnergyEnstrophy as DEE
+from parmepy.operator.computational import Computational
+
+
+class EnergyEnstrophy(Computational):
+    """
+    Computes enstrophy and the kinetic energy
+    \f{eqnarray*}
+    enstrophy = \frac{1}{\Omega}\int_\Omega \omega^2 d\Omega
+    \f} with \f$\Omega\f$ the volume or surface of the physical domain
+    \f$\omega\f$ the vorticity and
+    \f{eqnarray*}
+    energy = \frac{1}{2\Omega}\int_\Omega v^2 d\Omega
+    \f}
+    """
+
+    def __init__(self, velocity, vorticity, isNormalized=True, **kwds):
+        """
+        Constructor.
+        @param velocity field
+        @param vorticity field
+        @param isNormalized : boolean indicating whether the enstrophy
+        and energy values have to be normalized by the domain lengths.
+
+        Default file name = 'energy_enstrophy.dat'
+        See parmepy.tools.io_utils.Writer for details
+        """
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        super(EnergyEnstrophy, self).__init__(variables=[velocity, vorticity],
+                                              **kwds)
+        ## velocity field
+        self.velocity = velocity
+        ## vorticity field
+        self.vorticity = vorticity
+        ## are the energy end enstrophy values normalized by domain lengths ?
+        self.is_normalized = isNormalized
+        ## self._buffer_1 = 0.
+        ## self._buffer_2 = 0.
+        self.input = [velocity, vorticity]
+        self.output = []
+
+    def get_work_properties(self):
+        if not self._is_discretized:
+            msg = 'The operator must be discretized '
+            msg += 'before any call to this function.'
+            raise RuntimeError(msg)
+        vd = self.discreteFields[self.velocity]
+        wd = self.discreteFields[self.vorticity]
+        v_ind = vd.topology.mesh.iCompute
+        w_ind = wd.topology.mesh.iCompute
+        shape_v = vd[0][v_ind].shape
+        shape_w = wd[0][w_ind].shape
+        if shape_v == shape_w:
+            return {'rwork': [shape_v], 'iwork': None}
+        else:
+            return {'rwork': [shape_v, shape_w], 'iwork': None}
+
+    def setup(self, rwork=None, iwork=None):
+        if not self._is_uptodate:
+
+            self.discreteOperator = DEE(self.discreteFields[self.velocity],
+                                        self.discreteFields[self.vorticity],
+                                        self.is_normalized,
+                                        rwork=rwork)
+            # Output setup
+            self._set_io('energy_enstrophy', (1, 3))
+            self.discreteOperator.setWriter(self._writer)
+            self._is_uptodate = True
+
+    def energy(self):
+        """
+        Return last computed value of the energy
+        """
+        return self.discreteOperator.energy
+
+    def enstrophy(self):
+        """
+        Return last computed value of the enstrophy
+        """
+        return self.discreteOperator.enstrophy
diff --git a/HySoP/hysop/operator/monitors/energy_enstrophy.pyc b/HySoP/hysop/operator/monitors/energy_enstrophy.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9829535c151bb8782662fd5454751994470cfca
Binary files /dev/null and b/HySoP/hysop/operator/monitors/energy_enstrophy.pyc differ
diff --git a/HySoP/hysop/operator/monitors/enerref b/HySoP/hysop/operator/monitors/enerref
new file mode 100644
index 0000000000000000000000000000000000000000..e029450b90495c80b611464607e455aa4e979d0d
--- /dev/null
+++ b/HySoP/hysop/operator/monitors/enerref
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+"""
+@file energy_enstrophy.py
+Compute Energy and Enstrophy
+"""
+import numpy as np
+from parmepy.constants import debug, XDIR
+from parmepy.operator.monitors.monitoring import Monitoring
+from parmepy.tools.timers import timed_function
+import parmepy.tools.numpywrappers as npw
+
+
+class Energy_enstrophy(Monitoring):
+    """
+    Computes enstrophy and the kinetic energy
+    \f{eqnarray*}
+    enstrophy = \frac{1}{\Omega}\int_\Omega \omega^2 d\Omega
+    \f} with \f$\Omega\f$ the volume or surface of the physical domain
+    \f$\omega\f$ the vorticity and
+    \f{eqnarray*}
+    energy = \frac{1}{2\Omega}\int_\Omega v^2 d\Omega
+    \f}
+    """
+
+    def __init__(self, velocity, vorticity,
+                 viscosity, isNormalized, **kwds):
+        """
+        Constructor.
+        @param velocity field
+        @param vorticity field
+        @param viscosity : kinematic viscosity
+        @param isNormalized : boolean indicating whether the enstrophy
+        and energy values have to be normalized by the domain lengths.
+        @param topo : the topology on which we want to monitor the fields
+        Default file name = 'energy_enstrophy.dat'
+        See parmepy.tools.io_utils.Writer for details
+        """
+        # Set output file paramater
+        if 'io_params' in kwds:
+            params = kwds['io_params']
+            if 'filename' not in params:
+                params['filename'] = 'energy_enstrophy'
+            # Set output buffer shape
+            params['writebuffshape'] = (1, 3)
+
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        super(Energy_enstrophy, self).__init__(variables=[velocity, vorticity],
+                                               **kwds)
+        ## velocity field
+        self.velocity = velocity
+        ## vorticity field
+        self.vorticity = vorticity
+        ## viscosity (scalar)
+        self.viscosity = viscosity
+        ## are the energy end enstrophy values normalized by domain lengths ?
+        self.isNormalized = isNormalized
+        ## self._buffer_1 = 0.
+        ## self._buffer_2 = 0.
+        self.input = [velocity, vorticity]
+        self.output = []
+        # \todo : rewrite for multiresolution case.
+        # Note FP : for multiresolution case, it would probably be
+        # better to use two different operators for energy and enstrophy.
+
+    def setUp(self):
+        if not self._isUpToDate:
+            self.discreteFields[self.velocity] = \
+                self.velocity.discretization(self.topology)
+            self.discreteFields[self.vorticity] =\
+                self.vorticity.discretization(self.topology)
+
+            spaceStep = self.topology.mesh.space_step
+            length = self.topology.domain.length
+            if self.isNormalized:
+                self.coeffEnstrophy = (np.prod(spaceStep) /
+                                       np.prod(length))
+            else:
+                self.coeffEnstrophy = np.prod(spaceStep)
+            self.coeffEnergy = 0.5 * self.coeffEnstrophy
+
+            # A work vector for local computations
+            # Warning : we assume one topo for all variables
+            self.ind = self.topology.mesh.iCompute
+            shape = self.discreteFields[self.velocity].data[0][self.ind].shape
+            self._work = npw.zeros(shape)
+            self._isUpToDate = True
+
+    @debug
+    @timed_function
+    def apply(self, simulation=None):
+        """
+        Computation of kinetic energy, enstrophy &
+        Checking energy and enstrophy decay
+        """
+        if simulation is None:
+            raise ValueError("Missing simulation value for computation.")
+
+        #time = MPI.Wtime()
+
+        # --- Kinetic energy computation ---
+        vd = self.discreteFields[self.velocity]
+        # get the list of computation points (no ghosts)
+        nbc = vd.nbComponents
+        # Integrate (locally) velocity ** 2
+        self._work[...] = vd[XDIR][self.ind] ** 2
+        [np.add(self._work[...], vd[i][self.ind] ** 2, self._work[...])
+         for i in xrange(1, nbc)]
+        local_energy = np.sum(self._work)
+
+        # --- Enstrophy computation ---
+        vortd = self.discreteFields[self.vorticity]
+        nbc = vortd.nbComponents
+        # Integrate (locally) vorticity ** 2
+        self._work[...] = vortd[0][self.ind] ** 2
+        [np.add(self._work[...], vortd[i][self.ind] ** 2, self._work[...])
+         for i in xrange(1, nbc)]
+        local_enstrophy = np.sum(self._work)
+
+        # --- Reduce energy and enstrophy values overs all proc ---
+        # two ways : numpy or classical. Todo : check perf and comm
+        sendbuff = npw.zeros((2))
+        recvbuff = npw.zeros((2))
+        sendbuff[:] = [local_energy, local_enstrophy]
+        self.topology.comm.Allreduce(sendbuff, recvbuff)
+        # the other way :
+        #energy = self._topovel.topo.allreduce(local_energy, PARMES_MPI_REAL,
+        #                                     op=MPI.SUM)
+        #enstrophy = self._topovel.topo.allreduce(local_enstrophy,
+        #                                        PARMES_MPI_REAL,
+        #                                        op=MPI.SUM)
+
+        # Update global values
+        energy = recvbuff[0] * self.coeffEnergy
+        enstrophy = recvbuff[1] * self.coeffEnstrophy
+
+        # Update buffers
+        ## energyBuff1 = self._buffer_1
+        ## energyBuff2 = self._buffer_2
+        ## self._buffer_2 = self._buffer_1
+        ## self._buffer_1 = energy
+
+        # Print results, if required
+        ite = simulation.currentIteration
+        if self._writer is not None and self._writer.doWrite(ite):
+            self._writer.buffer[0, 0] = simulation.time
+            self._writer.buffer[0, 1] = energy
+            self._writer.buffer[0, 2] = enstrophy
+            self._writer.write()
diff --git a/HySoP/hysop/operator/monitors/monitoring.py b/HySoP/hysop/operator/monitors/monitoring.py
new file mode 100644
index 0000000000000000000000000000000000000000..2face3b0f149c7b0f6750baeefd18181b1c65b30
--- /dev/null
+++ b/HySoP/hysop/operator/monitors/monitoring.py
@@ -0,0 +1,54 @@
+"""
+@file monitoring.py
+Global interface for monitors.
+"""
+from abc import ABCMeta, abstractmethod
+from parmepy.operator.computational import Computational
+import parmepy.tools.io_utils as io
+from parmepy.tools.parameters import IO_params
+
+
+class Monitoring(Computational):
+    """Abstract interface to monitoring operators."""
+
+    __metaclass__ = ABCMeta
+
+    @abstractmethod
+    def __init__(self, **kwds):
+        """ Constructor
+        @param variables : list of fields to monitor.
+        @param topo : topo on which fields are to be monitored
+        @param filename output file name. Default is None ==> no output.
+        Full or relative path.
+        @param io_params : parameters (dict) to set file output.
+        If  None, no output. Set io_params = {} if you want output,
+        with default parameters values.
+        See parmepy.tools.io_utils.Writer for details
+        """
+        super(Monitoring, self).__init__(**kwds)
+        ## Object to store computational times of lower level functions
+
+    def discretize(self):
+        # All variables of a monitor must be already discretized
+        # So we just check if topologies have been properly
+        # associated with fields.
+        super(Monitoring, self)._standard_discretize()
+
+    def setup(self):
+        """
+        Class-method required
+        to fit with operator base-class interface.
+        """
+        self._is_uptodate = True
+
+    def finalize(self):
+        if self._writer is not None:
+            self._writer.finalize()
+
+    def __str__(self):
+        """
+        Common printings for operators
+        """
+        shortName = str(self.__class__).rpartition('.')[-1][0:-2]
+        s = shortName + " Monitor."
+        return s + "\n"
diff --git a/HySoP/hysop/operator/monitors/printer.py b/HySoP/hysop/operator/monitors/printer.py
new file mode 100644
index 0000000000000000000000000000000000000000..280e64a8be6d4e9d8cd55cf9d90e02e0f34028f4
--- /dev/null
+++ b/HySoP/hysop/operator/monitors/printer.py
@@ -0,0 +1,277 @@
+"""
+@file printer.py
+
+File output for field(s) value on a grid.
+"""
+from parmepy.constants import S_DIR, debug, HDF5, ASCII, PARMES_REAL
+from parmepy.operator.monitors.monitoring import Monitoring
+import parmepy.tools.numpywrappers as npw
+import parmepy.tools.io_utils as io
+import os
+
+try:
+    import h5py
+except ImportError as h5py_error:
+    h5py = None
+    msg = 'Warning, h5py not found, you may not be able to'
+    msg += ' use hdf5 I/O functionnalities.'
+    print msg
+
+from parmepy.tools.timers import timed_function
+
+
+class Printer(Monitoring):
+    """
+    Print field(s) values on a given topo, in HDF5 format.
+    """
+    def __init__(self, prefix=None, frequency=1, formattype=None,
+                 xmfalways=False, subset=None, **kwds):
+        """
+        Create a results printer for given fields, prefix
+        prefix (relative path) and an output frequency.
+
+        @param variables : list of variables to export.
+        @param frequency : output rate (output every freq iteration)
+        @param prefix output file name. Default is None ==> no output.
+        Full or relative path.
+        @param formattype : output file format, default=hdf5.
+        @param xmfalways : true if xmf output must be done every time
+        an hdf5 file is created
+        """
+        super(Printer, self).__init__(**kwds)
+
+        assert frequency > 0
+        self.frequency = frequency
+        ## Default output type
+        if formattype is None and h5py is not None:
+            self.formattype = HDF5
+        elif h5py is None:
+            self.formattype = ASCII
+        else:
+            self.formattype = formattype
+
+        if self.formattype == HDF5 and h5py is None:
+            print ('You set a printer with HDF5 as format and h5py module ',)
+            print ('is not present. You must use ASCII output')
+            raise h5py_error
+
+        self._topology = None
+
+        self.input = self.variables
+        self.output = []
+        # If no prefix is given, set it to
+        # the concatenation of variables'names.
+        if prefix is None:
+            prefix = io.io.default_path()
+            name = ''
+            for var in self.input:
+                name += var.name
+                name += '_'
+            prefix = os.path.join(prefix, name)
+        else:
+            if not os.path.isabs(prefix):
+                prefix = os.path.join(io.io.default_path(), prefix)
+        self.prefix = prefix
+        self._xmf_data_files = []
+        # count the number of calls
+        self._count = 0
+        ## Rank of 'leader' mpi process for output
+        ## Forced to 0. \todo : add a param for this?
+        self.io_rank = 0
+
+        ## Set a subset of the original domain, to reduce
+        ## output
+        self.subset = subset
+        if self.formattype == HDF5:
+            self.step = self._step_HDF5
+            if xmfalways:
+                self.step = self._step_HDF5_XMF
+        elif self.formattype == ASCII:
+            self.step = self._step_DATA
+
+    def setup(self):
+        assert self._single_topo, 'Multi-resolution case not implemented.'
+        self._topology = self.variables.values()[0]
+        io.io.check_dir(self.prefix, 0, self._topology.comm)
+
+        if self.formattype == HDF5:
+            # filename = prefix_N, N = counter value
+            self._get_filename = lambda i: self.prefix + \
+                "_{0:05d}".format(i) + '.h5'
+        elif self.formattype == ASCII:
+            self._get_filename = lambda i: self.prefix + \
+                "_{0}_{1:05d}.dat".format(self._topology.rank, i)
+
+        if self.subset is not None:
+            self.subset.discretize(self._topology)
+            self._slices = self.subset.slices[self._topology]
+            # Global resolution for hdf5 output
+            self.globalResolution = \
+                self.subset.globalResolution(self._topology)
+        else:
+            self.globalResolution = \
+                list(self._topology.globalMeshResolution - 1)
+            self._slices = self._topology.mesh.iCompute
+        self.globalResolution.reverse()
+
+        self._is_uptodate = True
+
+    @debug
+    @timed_function
+    def apply(self, simulation=None):
+        if simulation is None:
+            raise ValueError("Missing simulation value for monitoring.")
+
+        if simulation.currentIteration == -1 or \
+                simulation.currentIteration % self.frequency == 0:
+            # Transfer from GPU to CPU if required
+            for v in self.variables:
+                df = self.discreteFields[v]
+                try:
+                    if not df.isBatch:
+                        # To host only if data fit in the device memory
+                        df.toHost()
+                        df.wait()
+                except AttributeError:
+                    pass
+            self.step(simulation)
+            self._count += 1
+
+    def createXMFFile(self):
+        if self.formattype == HDF5 and self._topology.rank == self.io_rank:
+            # Write the xmf file driving all h5 files.
+            # Writing only one file
+            # We have a temporal list of Grid => a single xmf file
+            # Manual writing of the xmf file because "XDMF library very
+            # difficult to compile and to use"
+            #  [Advanced HDF5 & XDMF - Groupe Calcul]
+            f = open(self.prefix + '.xmf', 'w')
+            f.write("<?xml version=\"1.0\" ?>\n")
+            f.write("<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\">\n")
+            f.write("<Xdmf Version=\"2.0\">\n")
+            f.write(" <Domain>\n")
+            f.write("  <Grid Name=\"CellTime\" GridType=\"Collection\" ")
+            f.write("CollectionType=\"Temporal\">\n")
+            for ds_names, i, t in self._xmf_data_files:
+                f.write(io.XMF.write_grid_attributes(
+                    self._topology, ds_names, i, t, self._get_filename(i),
+                    self.subset))
+            f.write("  </Grid>\n")
+            f.write(" </Domain>\n")
+            f.write("</Xdmf>\n")
+            f.close()
+
+    def finalize(self):
+        self.createXMFFile()
+        Monitoring.finalize(self)
+
+    def _step_HDF5(self, simu):
+        filename = self._get_filename(self._count)
+        # Write the h5 file
+        # (force np.float64, ParaView seems to not be able to read float32)
+        # Writing compressed hdf5 files (gzip compression seems the best)
+        # In parallel, one file is written, thus filename no longe contains
+        # mpi rank.
+        # Remark : gzip compression does not work in parallel.
+        # Remark: h5py must be build with --mpi option
+        if self._topology.size == 1:
+            f = h5py.File(filename, "w")
+            compression = 'gzip'
+        else:
+            f = h5py.File(filename, 'w', driver='mpio',
+                          comm=self._topology.comm)
+            compression = None
+        # It's necessary to compute the set of indices of the current subset
+        # in global notation
+        if self.subset is None:
+            # Note : g_start and g_end do not include ghost points.
+            g_start = self._topology.mesh.global_start
+            g_end = self._topology.mesh.global_end + 1
+            sl = [slice(g_start[i], g_end[i])
+                  for i in xrange(self.domain.dimension)]
+        else:
+            g_start = self.subset.gstart
+            # convert self._slices to global position in topo
+            sl = self._topology.toIndexGlobal(self._slices)
+            # And shift using global position of the surface
+            sl = [slice(sl[i].start - g_start[i], sl[i].stop - g_start[i])
+                  for i in xrange(self.domain.dimension)]
+        sl.reverse()
+        sl = tuple(sl)
+        datasetNames = []
+        for field in self.variables:
+            df = field.discreteFields[self._topology]
+            for d in xrange(df.nbComponents):
+                # creating datasets for the vector field
+                currentName = df.name + S_DIR[d]
+                datasetNames.append(currentName)
+                ds = f.create_dataset(currentName,
+                                      self.globalResolution,
+                                      dtype=PARMES_REAL,
+                                      compression=compression)
+                # In parallel, each proc must write in the proper part
+                # Of the dataset (of site global resolution)
+                ds[sl] = npw.asrealarray(df.data[d][self._slices].T)
+
+        self._xmf_data_files.append((datasetNames, self._count, simu.time))
+
+        f.close()
+
+    def _step_HDF5_XMF(self, simu):
+        self._step_HDF5(simu)
+        self.createXMFFile()
+
+    def _step_DATA(self, simu):
+        f = open(self._get_filename(self._count), 'w')
+        shape = self._topology.mesh.resolution
+        coords = self._topology.mesh.coords
+        pbDimension = self.domain.dimension
+        if pbDimension == 2:
+            if len(shape) == 2:
+                for i in xrange(shape[0] - 1):
+                    for j in xrange(shape[1] - 1):
+                        f.write("{0:8.12} {1:8.12} ".format(
+                            coords[0][i, 0], coords[1][0, j]))
+                        for field in self.variables:
+                            df = field.discreteFields[self._topology]
+                            if field.isVector:
+                                f.write("{0:8.12} {1:8.12} ".format(
+                                    df[0][i, j], df[1][i, j]))
+                            else:
+                                f.write("{0:8.12} ".format(
+                                    df[0][i, j]))
+                        f.write("\n")
+        elif pbDimension == 3:
+            for i in xrange(shape[0] - 1):
+                for j in xrange(shape[1] - 1):
+                    for k in xrange(shape[2] - 1):
+                        f.write(
+                            "{0:8.12} {1:8.12} {2:8.12} ".format(
+                                coords[0][i, 0, 0],
+                                coords[1][0, j, 0],
+                                coords[2][0, 0, k]))
+                        for field in self.variables:
+                            df = field.discreteFields[self._topology]
+                            if field.isVector:
+                                f.write(
+                                    "{0:8.12} {1:8.12} " +
+                                    "{2:8.12} ".format(
+                                        df[0][i, j, k],
+                                        df[1][i, j, k],
+                                        df[2][i, j, k]))
+                            else:
+                                f.write("{0:8.12} ".format(
+                                    df[0][i, j, k]))
+                        f.write("\n")
+        else:
+            for i in xrange(shape[0] - 1):
+                f.write("{0:8.12} ".format(coords[0][i]))
+                for field in self.variables:
+                    df = field.discreteFields[self._topology]
+                    if field.isVector:
+                        f.write(
+                            "{0:8.12} ".format(df[0][i]))
+                    else:
+                        f.write("{0:8.12} ".format(df[i]))
+                        f.write("\n")
+        f.close()
diff --git a/HySoP/hysop/operator/monitors/printer.pyc b/HySoP/hysop/operator/monitors/printer.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..406d3356ffce11c79f65f239f138efbb564d1e41
Binary files /dev/null and b/HySoP/hysop/operator/monitors/printer.pyc differ
diff --git a/HySoP/hysop/operator/monitors/reader.py b/HySoP/hysop/operator/monitors/reader.py
new file mode 100644
index 0000000000000000000000000000000000000000..2148063a18694cc9ab11eaa19bcb338c9c265ea3
--- /dev/null
+++ b/HySoP/hysop/operator/monitors/reader.py
@@ -0,0 +1,141 @@
+"""
+@file reader.py
+
+File output for field(s) value on a grid.
+"""
+from parmepy.constants import debug, HDF5
+from parmepy.operator.monitors.monitoring import Monitoring
+import parmepy.tools.io_utils as io
+import os
+
+try:
+    import h5py
+except ImportError as h5py_error:
+    h5py = None
+from parmepy.tools.timers import timed_function
+
+
+class Reader(Monitoring):
+    """
+    Print field(s) values on a given topo, in HDF5 or VTK format.
+    """
+    def __init__(self, prefix, formattype=None, subset=None, names=None,
+                 **kwds):
+        """
+        @param variables : list of variables to read.
+        @param prefix input file name (without ext)
+        @param formattype : input file format, default=HDF5.
+        """
+        super(Reader, self).__init__(**kwds)
+        ## Default output type
+        if formattype is None:
+            self.formattype = HDF5
+        else:
+            self.formattype = formattype
+        if self.formattype is not HDF5:
+            raise ValueError("Format  not allowed : only HDF5\
+                readers are implemented.")
+        if self.formattype == HDF5 and h5py is None:
+            print ("You set a printer with HDF5 as format and h5py module ",)
+            print ("is not present. You must specify another extension",)
+            print (" (DATA or VTK)")
+            raise h5py_error
+
+        self.input = self.variables
+        self.output = self.variables
+        if not os.path.isabs(prefix):
+            prefix = os.path.join(io.io.default_path(), prefix)
+
+        if self.topology is not None:
+            io.io.check_dir(prefix, 0, self.topology.comm)
+        self.prefix = prefix
+        self._is_uptodate = True
+        ## Set a subset of the original domain, to reduce
+        ## output
+        self.subset = subset
+        if self.subset is not None:
+            self.subset.discretize(self.topology)
+            self._slices = self.subset.slices[self.topology]
+            # Global resolution for hdf5 output
+            self.globalResolution = self.subset.globalResolution(self.topology)
+        else:
+            self.globalResolution = list(self.topology.globalMeshResolution - 1)
+            self._slices = self.topology.mesh.iCompute
+        self.globalResolution.reverse()
+        self.step = self.readHDF5
+        filename = self.prefix + '.h5'
+        assert os.path.isfile(filename), 'error, file does not exists'
+        if self.topology.size == 1:
+            self._file = h5py.File(filename, "r")
+        else:
+            self._file = h5py.File(filename, 'r', driver='mpio',
+                                   comm=self.topology.comm)
+        self._globalSlice = []
+        if self.subset is None:
+            # Note : g_start and g_end do not include ghost points.
+            g_start = self.topology.mesh.global_start
+            g_end = self.topology.mesh.global_end + 1
+            self._globalSlice = [slice(g_start[i], g_end[i])
+                                 for i in xrange(self.domain.dimension)]
+        else:
+            g_start = self.subset.gstart
+            # convert self._slices to global position in topo
+            self._globalSlice = self.topology.toIndexGlobal(self._slices)
+            # And shift using global position of the surface
+            self._globalSlice = [slice(self._globalSlice[i].start - g_start[i],
+                                       self._globalSlice[i].stop - g_start[i])
+                                 for i in xrange(self.domain.dimension)]
+        self._globalSlice.reverse()
+        self._globalSlice = tuple(self._globalSlice)
+        if names is None:
+            self.names = {}
+            names = self.dataset_names()
+            i = 0
+            for field in self.variables:
+                self.names[field] = []
+                for d in xrange(field.nbComponents):
+                    self.names[field].append(names[i])
+                    i += 1
+        else:
+            self.names = names
+            for field in self.variables:
+                fname = self.names[field]
+                self.names[field] = [v for v in self.dataset_names()
+                                     if fname in v]
+               # assert len(self.names[field]) == field.nbComponents
+
+    @debug
+    @timed_function
+    def apply(self, simulation=None):
+        self.step()
+
+    def dataset_names(self):
+        """
+        Return the list of available names for datasets in
+        the required file.
+        """
+        return self._file.keys()
+
+    def readHDF5(self):
+        # It's necessary to compute the set of indices of the current subset
+        # in global notation
+        for field in self.variables:
+            df = field.discreteFields[self.topology]
+            for d in xrange(df.nbComponents):
+                # creating datasets for the vector field
+                #currentName = df.name + S_DIR[d]
+                #if not currentName in f.iterkeys():
+                #    raise ValueError("The required field name is \
+                #        not in HDF5 file.")
+                #Note FP : temp method --> use the first name is dataset
+                # as data for field. Todo : set a list of names
+                # to be downloaded.
+                currentName = self.names[field][d]
+                ds = self._file[currentName]
+                # In parallel, each proc must write in the proper part
+                # Of the dataset (of site global resolution)
+                df.data[d][self._slices] = ds[self._globalSlice].T
+
+    def finalize(self):
+        Monitoring.finalize(self)
+        self._file.close()
diff --git a/HySoP/hysop/operator/monitors/reprojection_criterion.pyc b/HySoP/hysop/operator/monitors/reprojection_criterion.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..22b1ca53663232754fdc439a48941819f2b86545
Binary files /dev/null and b/HySoP/hysop/operator/monitors/reprojection_criterion.pyc differ
diff --git a/HySoP/hysop/operator/penalization.py b/HySoP/hysop/operator/penalization.py
index 87826866ac8074f9623470dd59b4bc736b9e98b2..4de8f9c660c2ffc1d34b6e40d2bca0850d1be94c 100644
--- a/HySoP/hysop/operator/penalization.py
+++ b/HySoP/hysop/operator/penalization.py
@@ -4,12 +4,21 @@
 
 Penalisation of a given field
 """
+<<<<<<< HEAD
 from parmepy.operator.continuous import Operator
 from parmepy.operator.discrete.penalization import Penalization_d
 from parmepy.constants import debug
 
 
 class Penalization(Operator):
+=======
+from parmepy.operator.computational import Computational
+from parmepy.operator.discrete.penalization import Penalization as DiscrPenal
+from parmepy.constants import debug
+
+
+class Penalization(Computational):
+>>>>>>> Parmepy v0
     """
     Solves
     \f{eqnarray*}
@@ -24,12 +33,18 @@ class Penalization(Operator):
     @debug
     def __init__(self, obstacles, coeff, **kwds):
         """
+<<<<<<< HEAD
         Constructor.
         @param[in,out] variables : list of fields to be penalized
         @param[in] obstacle : list of domains where p
         enalization must be applied.
         @param[in] resolutions :  list of resolutions (one per variable)
 
+=======
+        @param[in] obstacle : list of domains where
+        penalization must be applied.
+        @param[in] coeff : the penalization factor
+>>>>>>> Parmepy v0
         """
         super(Penalization, self).__init__(**kwds)
 
@@ -42,6 +57,7 @@ class Penalization(Operator):
             self.obstacles = [obstacles]
         ## lambda penalization function
         self.coeff = coeff
+<<<<<<< HEAD
         self.resolution = self.resolutions.values()[0]
         # all variables must have the same resolution
         for resol in self.resolutions.values():
@@ -69,3 +85,19 @@ class Penalization(Operator):
 
         self.discreteOperator.setUp()
         self._isUpToDate = True
+=======
+        self.input = self.output = self.variables
+
+    def discretize(self):
+        super(Penalization, self)._standard_discretize()
+        # all variables must have the same resolution
+        assert self._single_topo, 'multi-resolution case not allowed.'
+
+    @debug
+    def setup(self, rwork=None, iwork=None):
+        self.discreteOperator = DiscrPenal(
+            variables=self.discreteFields.values(), obstacles=self.obstacles,
+            factor=self.coeff, rwork=rwork, iwork=iwork)
+
+        self._is_uptodate = True
+>>>>>>> Parmepy v0
diff --git a/HySoP/hysop/operator/poisson.py b/HySoP/hysop/operator/poisson.py
index ad6b4a848e772b06565d7a888148e89dfde43b26..269aa2aff55a372bdb6f6d021f62ae1b87584479 100644
--- a/HySoP/hysop/operator/poisson.py
+++ b/HySoP/hysop/operator/poisson.py
@@ -4,22 +4,17 @@
 Poisson problem.
 
 """
-from parmepy.operator.continuous import Operator
-try:
-    from parmepy.f2py import fftw2py
-except ImportError:
-    from parmepy.fakef2py import fftw2py
-
+from parmepy.operator.computational import Computational
 from parmepy.operator.discrete.poisson_fft import PoissonFFT
 from parmepy.constants import debug
-import numpy as np
-from parmepy.mpi.main_var import main_size
 from parmepy.operator.velocity_correction import VelocityCorrection
+from parmepy.operator.reprojection import Reprojection
 from parmepy.methods_keys import SpaceDiscretisation
+from parmepy.operator.continuous import opsetup
 import parmepy.default_methods as default
 
 
-class Poisson(Operator):
+class Poisson(Computational):
     """
     Poisson problem,
     \f{eqnarray*}
@@ -32,36 +27,35 @@ class Poisson(Operator):
     """
 
     @debug
-    def __init__(self, velocity, vorticity, flowrate=None, **kwds):
+    def __init__(self, velocity, vorticity, flowrate=None,
+                 projection=None, **kwds):
         """
         Constructor for the Poisson problem.
 
         @param[out] velocity : solution field
         @param[in] vorticity : rhs field
-        @param[in] resolutions :  list of resolutions (one per variable)
-        @param[in] ghosts : ghost layer (optional), default=[0, 0, ...]
-        @param[in] method : numerical method (default = fft)
         @param[in] flowrate : a flow rate value (through input surf,
         normal to xdir) used to compute a correction of the solution field.
         Default = 0 (no correction). See parmepy.operator.velocity_correction.
+        @param projection : if None, no projection. Else:
+        - either the value of the frequency of reprojection, never update.
+        - or a tuple = (frequency, threshold).
+        In that case, a criterion
+        depending on the vorticity will be computed at each time step, if
+        criterion > threshold, then frequency projection is active.
         """
+        # Warning : for fftw all variables must have
+        # the same resolution.
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        super(Poisson, self).__init__(variables=[velocity, vorticity], **kwds)
         ## solution of the problem
         self.velocity = velocity
         ## -(right-hand side)
         self.vorticity = vorticity
-        # Warning : for fftw all variables must have
-        # the same resolution.
-        super(Poisson, self).__init__(variables=[velocity, vorticity], **kwds)
-
         if self.method is None:
             self.method = default.POISSON
 
-        if self.method[SpaceDiscretisation] is 'fftw':
-            self.resolution = self.resolutions[self.velocity]
-            assert self.resolution == self.resolutions[self.vorticity],\
-                'Poisson error : for fftw, all variables must have\
-                the same global resolution.'
-        else:
+        if self.method[SpaceDiscretisation] is not 'fftw':
             raise AttributeError("Method not yet implemented.")
 
         self.input = [self.vorticity]
@@ -72,76 +66,58 @@ class Poisson(Operator):
         else:
             self.withCorrection = False
         self.correction = None
-        self.projection = None
+        self.projection = projection
+        self._config = kwds
+
+        if projection is not None:
+            self.output.append(self.vorticity)
 
     def discretize(self):
         # Poisson solver based on fftw
-        if (self.ghosts is not None) and (np.sum(self.ghosts)!=0):
-            raise AttributeError("Ghosts points not yet\
-            implemented for poisson operator.")
-
-        # Compute local resolution/distribution of data
-        # according to fftw requirements.
-        if self.topology is not None:
-            comm = self.topology.comm
-        elif self._comm is not None:
-            comm = self._comm
-        else:
-            from parmepy.mpi.main_var import main_comm as comm
-        localres, localoffset = fftw2py.init_fftw_solver(
-            self.resolution, self.domain.length, comm=comm.py2f())
-
-        if self.topology is not None:
-            commsize = self.topology.size
-        elif self._comm is not None:
-            commsize = self._comm.Get_size()
-        else:
-            commsize = main_size
-        topodims = np.ones((self.domain.dimension))
-        topodims[-1] = commsize
-        #variables discretization
-        if self.topology is not None:
-            assert (self.topology.shape == topodims).all(), 'input topology is\
-                    not compliant with fftw.'
-            self._discretize_single_topo()
+        if self.method[SpaceDiscretisation] is 'fftw':
+            super(Poisson, self)._fftw_discretize()
+            if self.withCorrection:
+                toporef = self.discreteFields[self.velocity].topology
+                if 'discretization' in self._config:
+                    self._config['discretization'] = toporef
+                self.correction = VelocityCorrection(
+                    self.velocity, self.vorticity,
+                    req_flowrate=self._flowrate, **self._config)
+                self.correction.discretize()
 
+                if isinstance(self.projection, tuple):
+                    freq = self.projection[0]
+                    threshold = self.projection[1]
+                    self.projection = Reprojection(self.vorticity,
+                                                   threshold, freq,
+                                                   **self._config)
+                    self.projection.discretize()
         else:
-            for v in self.variables:
-                topo = self.domain.getOrCreateTopology(
-                    self.domain.dimension,
-                    self.resolution, topodims,
-                    precomputed=True,
-                    offset=localoffset,
-                    localres=localres,
-                    ghosts=self.ghosts,
-                    comm=self._comm)
-                self.discreteFields[v] = v.discretize(topo)
-        # Build and discretize correction operator, if necessary
-        if self.withCorrection:
-            self.correction = VelocityCorrection(self.velocity, self.vorticity,
-                                                 resolutions=self.resolutions,
-                                                 flowrate=self._flowrate,
-                                                 topo=self.velocity.topology)
-            self.correction.discretize()
+            raise AttributeError("Method not yet implemented.")
 
     @debug
-    def setUp(self):
-        """
-        """
-        # Build and setup for the discrete operator
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
+        # Activate correction, if required
         if self.withCorrection:
-            self.correction.setUp()
-            cd = self.correction.discreteOperator
+            self.correction.setup()
+            cd = self.correction.discrete_op
         else:
             cd = None
-        self.discreteOperator = PoissonFFT(self.discreteFields[self.velocity],
+
+        # Activate projection, if required
+        if isinstance(self.projection, Reprojection):
+            # Projection frequency is updated at each
+            # time step, and depends on the vorticity
+            self.projection.setup(rwork=rwork)
+            projection_discr = self.projection.discrete_op
+        else:
+            projection_discr = self.projection
+
+        self.discrete_op = PoissonFFT(self.discreteFields[self.velocity],
                                            self.discreteFields[self.vorticity],
                                            correction=cd,
-                                           projection=self.projection)
-        self.discreteOperator.setUp()
-        self._isUpToDate = True
+                                           rwork=rwork, iwork=iwork,
+                                           projection=projection_discr)
 
-    def activateProjection(self, projection):
-        # If there is a projection, vorticity is also an output
-        self.output.append(self.vorticity)
-        self.projection = projection
+        self._is_uptodate = True
diff --git a/HySoP/hysop/operator/redistribute.py b/HySoP/hysop/operator/redistribute.py
index 5ca4da11473e87cf02f8f25b2963069c80e5d00b..befce9d4d0b8e2951482347d51c7fa0d6eb1b37e 100644
--- a/HySoP/hysop/operator/redistribute.py
+++ b/HySoP/hysop/operator/redistribute.py
@@ -1,375 +1,184 @@
 """
 @file redistribute.py
-Setup for data transfer/redistribution between two parmes topologies.
-
-This operator is an inter operator which is supposed to define the process to
-transfer variables and data from one operator to another.
-This mainly concerns data redistribution if the two operators work on
-different mpi topologies.
-
-When is it required to define an operator between op1 and op2?
-If:
-- the intersection between op1.output-variables and op2.input-variables
-  is not empty
-- AND if the topology on which the variables of op1 are defined is different
-from the one of op2 variables.
-
-Note Franck: this kind of operator may also be useful
-to define the interpolation/filter process for data transfer for
-a variable defined on several meshes.
-
+Abstract interface for data redistribution.
 """
-from parmepy import __VERBOSE__
-from parmepy.constants import debug, PARMES_MPI_REAL, ORDERMPI, np, S_DIR
+
 from parmepy.operator.continuous import Operator
-from parmepy.mpi.bridge import Bridge
-from parmepy.methods_keys import Support
+from abc import ABCMeta, abstractmethod
+from parmepy.mpi.topology import Cartesian
+from parmepy.operator.computational import Computational
 
 
 class Redistribute(Operator):
     """
-    Interconnection between two operators.
-    SetUp will compute (or get if it already exists) a Bridge between two
-    topologies.
-    Apply redistributes data from opFrom topology to opTo topology.
-
+    Bare interface to redistribute operators
     """
-    @debug
-    def __init__(self, opFrom, opTo, name_suffix=None, component=None, **kwds):
 
-        """
-        Create an operator to distribute data between two mpi topologies for a
-        list of variables belonging to two operators.
+    __metaclass__ = ABCMeta
 
-        @param variables : the set of variables to be redistributed
-        @param opFrom : source operator
-        @param opTo : target (i.e.) the operator that handles the topology on
-        which data must be redistributed.
-        @param component: components of vector fields to consider (default:
-        None, all components are taken).
+    def __init__(self, source, target, component=None,
+                 run_till=None, **kwds):
+        """
+        @param source : topology or computational operator
+        @param target : topology or computational operator
+        @param component : which component must be distributed (default = all)
+        @param run_till : a list of operators that must wait for the completion
+        of this redistribute before any apply.
         """
+        # Base class initialisation
         super(Redistribute, self).__init__(**kwds)
-        vars_str = "_("
-        for vv in self.variables:
-            vars_str += vv.name + ","
-        vars_str = vars_str[:-1] + ')'
-        if component is not None:
-            vars_str += S_DIR[component]
-        if name_suffix is None:
-            name_suffix = ''
-        self.name += vars_str + name_suffix
-        ## Source Operator
-        self.opFrom = opFrom
-        ## Targeted operator
-        self.opTo = opTo
 
-        self.input = self.output = self.variables
-        self.evts = []
-        self._toHost_fields = []
-        self._toDevice_fields = []
-        self._hasRequests = False
+        # Object (may be an operator or a topology) which handles the
+        # fields to be transfered
+        self._source = source
+        # Object (may an operator or a topology) which handles the fields
+        # to be filled in from source.
+        self._target = target
+
         self.component = component
         if component is None:
             # All components are considered
-            self._range_components = lambda v: range(v.nbComponents)
+            self._range_components = lambda v: xrange(v.nbComponents)
         else:
             # Only the given component is considered
-            self._range_components = lambda v: [component]
-        self.r_request = {}
-        self.s_request = {}
-        self._r_types = {}
-        self._s_types = {}
-        for v in self.variables:
-            self._r_types[v] = {}
-            self._s_types[v] = {}
-
-        # Enable desynchronization: the opTo operator must call the wait
-        # function of this redistribute. This operator has to know self.
-        self.opTo.addRedistributeRequirement(self)
-
-    def discretize(self):
-        pass
-
-    @debug
-    def setUp(self):
+            assert self.component >= 0, 'component value must be positive.'
+            self._range_components = lambda v: (self.component)
+
+        ## Bridge between topology of source and topology of target
+        self.bridge = None
+        # True if some MPI operations are running for the current operator.
+        self._has_requests = False
+        # Which operator must wait for this one before
+        # any computation
+        # Exp : run_till = op1 means that op1 will
+        # wait for the end of this operator before
+        # op1 starts its apply.
+        if run_till is None:
+            run_till = []
+
+        assert isinstance(run_till, list)
+        self._run_till = run_till
+
+    @abstractmethod
+    def setup(self, rwork=None, iwork=None):
         """
-        Computes intersection of two topologies.
-
+        Check/set the list of variables to be distributed
+
+        What must be set at setup?
+        ---> the list of continuous variables to be distributed
+        ---> the bridge (one for all variables, which means
+        that all vars must have the same topology in source
+        and the same topology in target.
+        ---> the list of discrete variables for source and
+        for target.
         """
-        # Then check if variables belong to both operators
-        # And check if variables have enought components.
-        for v in self.variables:
-            assert v in self.opFrom.variables and v in self.opTo.variables, \
-                'Redistribute error : one of the variable is not present\
-                in both source and target operator.'
-            if self.component is not None:
-                assert self.component >= 0, 'component needs to be positive'
-            assert v.nbComponents > self.component, \
-                'Redistribute error : variable ' + str(v.name) + ' do not \
-                have enough components (' + str(self.component) + ')'
-        assert self.opFrom.isUp() and self.opTo.isUp(), \
-            """You should setup both opFrom and opTo operators
-            before any attempt to setup a redistribute operator."""
-
-        # Look for an operator operating on device.
-        try:
-            opFrom_is_device = \
-                self.opFrom.method[Support].find('gpu') >= 0
-        except KeyError:  # op.method is a dict not containing Support in keys
-            opFrom_is_device = False
-        except IndexError:  # op.method is a string
-            opFrom_is_device = False
-        except TypeError:  # op.method is None
-            opFrom_is_device = False
-        try:
-            opTo_is_device = \
-                self.opTo.method[Support].find('gpu') >= 0
-        except KeyError:  # op.method is a dict not containing Support in keys
-            opTo_is_device = False
-        except IndexError:  # op.method is a sting
-            opTo_is_device = False
-        except TypeError:  # op.method is None
-            opTo_is_device = False
-
-        if not opFrom_is_device and not opTo_is_device:
-            # case: opFrom(host) --host--> opTo(host)
-            self.apply = self._host
-            self.wait = self._wait_host
-        else:
-            # Have on device operators
-            self.wait = self._wait_all
-            if opFrom_is_device and not opTo_is_device:
-                # case: opFrom(GPU) --toHost--host--> opTo(host)
-                self.apply = self._apply_toHost_host
-            elif not opFrom_is_device and opTo_is_device:
-                # case: opFrom(host) --host--toDevice--> opTo(GPU)
-                self.apply = self._apply_host_toDevice
-            else:
-                # case: opFrom(GPU) --toHost--host--toDevice--> opTo(host)
-                # Transfers are removed if variables are batched
-                if np.any([self.opFrom.discreteFields[v].isBatch
-                           for v in self.variables] +
-                          [self.opTo.discreteFields[v].isBatch
-                           for v in self.variables]):
-                    self.apply = self._host
-                else:
-                    self.apply = self._apply_toHost_host_toDevice
-
-        # Build bridges and toTransfer lists
-        self.bridges = {}
-        backup = None
-        lastvar = None
-        # Create bridges between topologies, for each variable.
-        for v in self.variables:
-            # Bridges creation
-            topofrom = self.opFrom.discreteFields[v].topology
-            topoto = self.opTo.discreteFields[v].topology
-            if backup is not None:
-                # Check if a similar bridge has not already been created.
-                if [topofrom, topoto] == backup:
-                    self.bridges[v] = self.bridges[lastvar]
-            else:
-                self.bridges[v] = Bridge(topofrom, topoto)
-            backup = [topofrom, topoto]
-            lastvar = v
-            # toTransfer list completion
-            if opFrom_is_device:
-                self._toHost_fields.append(self.opFrom.discreteFields[v])
-            if opTo_is_device:
-                self._toDevice_fields.append(self.opTo.discreteFields[v])
-
-        self._main_comm = self.opFrom.discreteFields[v].topology.parent()
-        self._main_rank = self._main_comm.Get_rank()
-
-        # Flag telling if there will be some mpi data transfers.
-        self._useless_transfer = {}
+        assert self.domain is not None
         for v in self.variables:
-            self._useless_transfer[v] = \
-                (opFrom_is_device and opTo_is_device) and \
-                len(self.bridges[v].recvFrom.keys()) == 0 and \
-                len(self.bridges[v].sendTo.keys()) == 0
-
-        # Build MPI subarrays
-        dim = self.domain.dimension
-        for v in self.variables:
-            br = self.bridges[v]
-            vToShape = self.opTo.discreteFields[v].data[0].shape
-            vFromShape = self.opFrom.discreteFields[v].data[0].shape
-            for rk in br.recvFrom.keys():
-                subvshape = tuple([br.recvFrom[rk][i].stop -
-                                   br.recvFrom[rk][i].start
-                                   for i in range(dim)])
-                substart = tuple([br.recvFrom[rk][i].start
-                                  for i in range(dim)])
-                self._r_types[v][rk] = \
-                    PARMES_MPI_REAL.Create_subarray(vToShape,
-                                                    subvshape,
-                                                    substart,
-                                                    order=ORDERMPI)
-                self._r_types[v][rk].Commit()
-            for rk in br.sendTo.keys():
-                subvshape = tuple([br.sendTo[rk][i].stop -
-                                   br.sendTo[rk][i].start
-                                   for i in range(dim)])
-                substart = tuple([br.sendTo[rk][i].start
-                                  for i in range(dim)])
-                self._s_types[v][rk] = \
-                    PARMES_MPI_REAL.Create_subarray(vFromShape,
-                                                    subvshape,
-                                                    substart,
-                                                    order=ORDERMPI)
-                self._s_types[v][rk].Commit()
-
-        self._isUpToDate = True
+            assert v.domain is self.domain
+        super(Redistribute, self).setup(rwork, iwork)
 
-    def _apply_toHost_host_toDevice(self, simulation=None):
-        if __VERBOSE__:
-            print ("{0} APPLY toHOST+HOST+toDEVICE".format(self._main_rank))
-        self._toHost()
-        self._wait_device()
-        self._host()
-        self._wait_host()
-        self._toDevice()
-
-    def _apply_toHost_host(self, simulation=None):
-        if __VERBOSE__:
-            print ("{0} APPLY toHOST+HOST".format(self._main_rank))
-        self._toHost()
-        self._wait_device()
-        self._host()
-
-    def _apply_host_toDevice(self, simulation=None):
-        if __VERBOSE__:
-            print ("{0} APPLY HOST+toDEVICE".format(self._main_rank))
-        self._host()
-        self._wait_host()
-        self._toDevice()
-
-    def _toHost(self):
+    def _check_operator(self, op):
         """
-        Proceed with data transfer of variables from device to host
+        @param op : a computational operator
+        - check if op is really a computational operator
+        - discretize op
+        - check if all required variables (if any) belong to op
         """
-        for v in self.variables:
-            dv = self.opFrom.discreteFields[v]
-            if dv in self._toHost_fields:
-                if not self._useless_transfer[v]:
-                    dv.toHost(self.component)
-
-    def _toDevice(self):
+        assert isinstance(op, Computational)
+        op.discretize()
+        msg = 'The variables to be distributed '
+        msg += 'do not belong to the input operator.'
+        if len(self.variables) > 0:
+            assert all(v in op.variables for v in self.variables), msg
+
+    def _set_variables(self):
         """
-        Proceed with data transfer of variables from device to host
+        Check/set the list of variables proceed by the current operator.
         """
-        for v in self.variables:
-            dv = self.opTo.discreteFields[v]
-            if dv in self._toDevice_fields:
-                if not self._useless_transfer[v]:
-                    dv.toDevice(self.component)
+        # Set list of variables.
+        # It depends on :
+        # - the type of source/target : Cartesian, Computational or None
+        # - the args variables : a list of variables or None
+        # Possible cases:
+        # - if source or target is None --> variables is required
+        # - if source and target are Cartesian --> variables is required
+        # - in all other cases, variables is optional.
+        # If variables are not set at init,
+        # they must be infered from source/target operators.
+        has_var = len(self.variables) > 0
+        vlist = (v for v in self.variables)
+
+        if self._source is None or self._target is None:
+            assert len(self.variables) > 0
+            self.variables = [v for v in vlist]
+        else:
+            source_is_topo = isinstance(self._source, Cartesian)
+            target_is_topo = isinstance(self._target, Cartesian)
+
+            # both source and target are topologies. Variables required.
+            if source_is_topo and target_is_topo:
+                msg = 'Redistribute, a list of variables is required at init.'
+                assert has_var, msg
+                self.variables = [v for v in vlist]
+
+            elif not source_is_topo and not target_is_topo:
+                # both source and target are operators
+                # --> intersection of their variables
+                vsource = self._source.variables
+                vtarget = self._target.variables
+                if not has_var:
+                    vlist = (v for v in vsource if v in vtarget)
+                self.variables = [v for v in vlist]
+
+            elif source_is_topo:
+                # source = topo, target = operator
+                vtarget = self._target.variables
+                if not has_var:
+                    vlist = (v for v in vtarget)
+                self.variables = [v for v in vlist]
 
-    def _host(self, simulation=None):
-        """
-        Proceed with data redistribution from opFrom to opTo
-        """
-        # TODO :
-        # - save a set of bridges in the domain and access them from operator
-        # - process all variables in one shot if they have the same topo
-        # (use buffers for mpi send/recv? )
-        # - move MPI datatypes into the bridge? --> and free MPI type properly
-        if __VERBOSE__:
-            print ("{0} APPLY HOST".format(self._main_rank))
-        self.r_request = {}
-        self.s_request = {}
-        for v in self.variables:
-            br = self.bridges[v]
-            # Apply for each component considered
-            for d in self._range_components(v):
-                if __VERBOSE__:
-                    print ("{0} APPLY HOST".format(self._main_rank),
-                           self.opFrom.discreteFields[v].name, '->',
-                           self.opTo.discreteFields[v].name, S_DIR[d])
-                vTo = self.opTo.discreteFields[v].data[d]
-                vFrom = self.opFrom.discreteFields[v].data[d]
-                v_name = self.opFrom.discreteFields[v].name + S_DIR[d]
-                if br.hasLocalInter:
-                    vTo[br.ito] = vFrom[br.ifrom]
-                cRk = self._main_comm.Get_rank()
-                for rk in br.recvFrom.keys():
-                    recvtag = (cRk + 1) * 989 + (rk + 1) * 99 + (d + 1) * 88
-                    self.r_request[v_name + str(rk)] = \
-                        self._main_comm.Irecv([vTo, 1, self._r_types[v][rk]],
-                                              source=rk, tag=recvtag)
-                    self._hasRequests = True
-                for rk in br.sendTo.keys():
-                    sendtag = (rk + 1) * 989 + (cRk + 1) * 99 + (d + 1) * 88
-                    self.s_request[v_name + str(rk)] = \
-                        self._main_comm.Issend([vFrom, 1,
-                                               self._s_types[v][rk]],
-                                               dest=rk, tag=sendtag)
-                    self._hasRequests = True
+            else:
+                # source = operator, target = topo
+                vsource = self._source.variables
+                if not has_var:
+                    vlist = (v for v in vsource)
+                self.variables = [v for v in vlist]
 
-    def _wait_host(self):
-        """
-        MPI Barrier to wait for the end
-        of all communication requests.
-        """
-        if __VERBOSE__:
-            print ("{0}", "WAIT MPI".format(self._main_rank),
-                   self._hasRequests)
-        if self._hasRequests:
-            for rk in self.r_request.keys():
-                self.r_request[rk].Wait()
-            for rk in self.s_request.keys():
-                self.s_request[rk].Wait()
-        self._hasRequests = False
+        assert len(self.variables) > 0
 
-    def _wait_device(self):
-        if __VERBOSE__:
-            print ("{0}".format(self._main_rank), "WAITING OPENCL")
-        for dv in self._toDevice_fields + self._toHost_fields:
-            dv.wait()
+        # Variables is converted to a dict to be coherent with
+        # computational operators ...
+        self.variables = {key: None for key in self.variables}
 
-    def _wait_all(self):
-        self._wait_host()
-        self._wait_device()
+        # All variables must have the same domain
+        self.domain = self.variables.keys()[0].domain
+        for v in self.variables:
+            assert v.domain is self.domain
 
-    def test(self, rsend=None, rrecv=None):
+    def _set_topology(self, current):
         """
-        if neither rsend or rrecv is given return
-        True if all communication request are complete
-        else check for sending to rsend or
-        receiving from rrecv. Process ranks
-        should be given in main_comm.
-        @param rsend : discrete variable name + S_DIR + rank of the process
-        to which a message has been sent
-        and for which we want to test
-        message completion.
-        @param  rrecv : discrete variable name + S_DIR + rank of the process
-        from which a message has been receive
-        and for which we want to test
-        message completion.
+        @param current: a topology or a computational operator
+        This function check if current is valid, fits with self.variables
+        and get its topology to set self._topology.
         """
-        if(rsend is not None or rrecv is not None):
-            send_res = True
-            recv_res = True
-            if rsend is not None:
-                send_res = self.s_request[rsend].Test()
-            if rrecv is not None:
-                recv_res = self.r_request[rrecv].Test()
-            res = send_res and recv_res
+        if isinstance(current, Cartesian):
+            result = current
+            for v in self.variables:
+                v.discretize(result)
+        elif isinstance(current, Computational):
+            self._check_operator(current)
+            vref = self.variables.keys()[0]
+            vcurrent = current.variables
+            result = vcurrent[vref]
+            # We ensure that all vars have
+            # the same topo in target/target.
+            for v in (v for v in self.variables if v is not vref):
+                assert vcurrent[v] is result
         else:
-            res = True
-            for rk in self.r_request.keys():
-                res = self.r_request[rk].Test()
-                if not res:
-                    return res
-            for rk in self.s_request.keys():
-                res = self.s_request[rk].Test()
-                if not res:
-                    return res
-        return res
+            msg = "the source/target is neither an operator or a topology."
+            raise AttributeError(msg)
+        assert result.task_id() == self.domain.currentTask()
+        return result
 
-    def addRedistributeRequirement(self, red):
-        raise ValueError(
-            "Cannot add a requirement to a Redistribute operator.")
-
-    def getRedistributeRequirement(self):
-        return []
+    def printComputeTime(self):
+        pass
diff --git a/HySoP/hysop/operator/redistribute_inter.py b/HySoP/hysop/operator/redistribute_inter.py
new file mode 100644
index 0000000000000000000000000000000000000000..58fc60a5a464358a3caec1bb244be463286de6cf
--- /dev/null
+++ b/HySoP/hysop/operator/redistribute_inter.py
@@ -0,0 +1,268 @@
+"""
+@file redistribute_intercomm.py
+Setup for data transfer/redistribution between a single parmes topology based
+on different MPI communicators with null intersection (for example
+by Comm_Split). One of the topology is labeled as the source and the other is
+the destination.
+
+It relies on a Bridge_intercomm.
+"""
+from parmepy.constants import debug, S_DIR
+from parmepy import __VERBOSE__
+from parmepy.mpi.newBridge import InterBridge
+from parmepy.operator.redistribute import Redistribute
+from parmepy.operator.continuous import Operator
+
+
+class RedistributeInter(Redistribute):
+    """
+    Interconnection between two topologies on different sub set of MPI process.
+    SetUp will compute a Bridge_intercomm between a single topology.
+    Transfers data from topology of id_from to the id_to.
+    """
+    @debug
+    def __init__(self, proc_tasks, sourceId=None, targetId=None, **kwds):
+        """
+        Create an operator to distribute data between two mpi topologies for a
+        list of variables.
+
+        @param proc_tasks: python array specifying the task id of each of
+        the parent_comm MPI intracommunicator.
+        @remark : proc_tasks size and number of processus in parent_comm
+        must be equal.
+        """
+        super(RedistributeInter, self).__init__(**kwds)
+        # Change task_id of the current operator: -1 means
+        # it belongs to several tasks.
+        self.task_id = -1
+        assert 'parent_comm' in kwds, 'A parent communicor must be set.'
+        assert self._parent_comm.Get_size() == len(proc_tasks), \
+            "Parent communicator ({0})".format(self._parent_comm.Get_size()) +\
+            " and size of the task id array " + \
+            "({0}) are not equal".format(len(proc_tasks))
+        ## connectivity between tasks and procs
+        self.proc_tasks = proc_tasks
+        self._my_rank = None
+        self._sourceId = sourceId
+        self._targetId = targetId
+        msg = 'You must provide both source and target arguments.'
+        if 'source' in kwds:
+            assert 'target' in kwds, msg
+        if 'target' in kwds:
+            assert 'source' in kwds, msg
+
+        # If ids can not be deduced from source/target :
+        if not isinstance(self._source, Operator):
+            msg = 'You must set sourceId and targetId in arguments.'
+            assert sourceId is not None, msg
+            assert targetId is not None, msg
+
+        else:
+            msg = 'source/target ids arg are useless when '
+            msg += 'sourceOp, targetOp are used.'
+            assert 'sourceId' not in kwds, msg
+            assert 'targetId' not in kwds, msg
+            self._sourceId = self._source.task_id
+            self._targetId = self._target.task_id
+
+    def _vars_setup_fromdict(self):
+        """
+        set vSource/vTarget dictionnaries and create bridges
+        @param varsDict: {v: (topoSource, topoTarget)_v,
+                          w: (topoSource, topoTarget)_w,
+                          ...}
+
+        Warning : this routine can not be called during init, topologies may
+        not exist if the operator does not belong to the current
+        MPI task.
+        """
+        msg = 'Too many arguments in Operator init/setup.'
+        msg += 'You must choose either variables = "list + source + target"'
+        msg += ' or variables = "dictionnary".'
+        assert self._source is None and self._target is None, msg
+        for v in self.variables:
+            topoFrom = self.variables[v][0]
+            topoTo = self.variables[v][1]
+            self.bridges[v] = InterBridge(topoFrom, topoTo,
+                                          self._sourceId, self._targetId,
+                                          proc_tasks=self.proc_tasks,
+                                          parent_comm=self._parent_comm)
+
+    def _vars_setup_fromlist(self):
+        """
+        set vSource/vTarget dictionnaries, when all source/target variables
+        have the same topology.
+        @param source_target : a tuple of topologies (1) or operators (2)
+        case 1 :
+        Redistribute(variables=[v, w, ...], source=topo1, target=topo2, ...)
+        case 2 :
+        Redistribute([v, w, ...], source=op1, target=op2, ...)
+        """
+        from parmepy.operator.continuous import Operator
+        if not isinstance(self._source, Operator):
+            for v in self.variables:
+                self.variables[v] = (self._source, self._target)
+                self.bridges[v] = InterBridge(self._source, self._target,
+                                              self._sourceId, self._targetId,
+                                              self.proc_tasks,
+                                              self._parent_comm)
+
+            ##        elif isinstance(self._source, Operator):
+            ## for v in self.variables:
+            ##     # check if variables belong to both operators
+            ##     # And check if variables have enougth components.
+            ##     self._checkOperators(v)
+            ##     # Then get topologies and build the bridges
+            ##     self.vSource[v] = self._source.discreteFields[v]
+            ##     self.vTarget[v] = self._target.discreteFields[v]
+            ##     topoSource = self.vSource[v].topology
+            ##     topoTarget = self.vTarget[v].topology
+            ##     self.variables[v] = (topoSource, topoTarget)
+            ##     self.bridges[v] = InterBridge(topoSource, topoTarget)
+        else:
+            raise ValueError("Source/target type must be\
+                              either Cartesian or Operator.")
+
+    @debug
+    def setup(self):
+        """
+        Computes intersection of topologies and set the MPI intercommunicator.
+        """
+        super(RedistributeInter, self).setup()
+        self._parent_rank = self._parent_comm.Get_rank()
+
+        self._isSource = self.proc_tasks[self._parent_rank] == self._sourceId
+        self._isTarget = self.proc_tasks[self._parent_rank] == self._targetId
+
+        self._the_apply = self._apply_host
+
+        if self._isSource:
+            self._currentIndex = 0
+        elif self._isTarget:
+            self._currentIndex = 1
+
+        for v in self.variables:
+            topology = self.variables[v][self._currentIndex]
+            dv = v.discreteFields[topology]
+            transfers = self.bridges[v].transfers
+            vShape = dv.data[0].shape
+            # Set derived types
+            self.bridges[v]._createSubArray(transfers, self._r_types[v],
+                                            vShape)
+        self._is_uptodate = True
+
+    @debug
+    def apply(self, simulation=None):
+        """
+        Apply this operator to its variables.
+        @param simulation : object that describes the simulation
+        parameters (time, time step, iteration number ...), see
+        parmepy.problem.simulation.Simulation for details.
+        """
+        for req in self.requirements:
+            req.wait()
+        self._the_apply(simulation)
+
+    def _apply_toHost_host_toDevice(self, simulation=None):
+        if __VERBOSE__:
+            print ("{0} APPLY toHOST+HOST+toDEVICE".format(self._parent_rank))
+        if self.proc_tasks[self._parent_rank] == self.id_from:
+            self._toHost()
+            self._wait_device()
+        self._host()
+        self._wait_host()
+        if self.proc_tasks[self._parent_rank] == self.id_to:
+            self._toDevice()
+            self._wait_device()
+
+    def _apply_toHost_host(self, simulation=None):
+
+        if __VERBOSE__:
+            print ("{0} APPLY toHOST+HOST".format(self._parent_rank))
+        if self.proc_tasks[self._parent_rank] == self.id_from:
+            self._toHost()
+            self._wait_device()
+        self._host()
+        self._wait_host()
+
+    def _apply_host_toDevice(self, simulation=None):
+        if __VERBOSE__:
+            print ("{0} APPLY HOST+toDEVICE".format(self._parent_rank))
+        self._host()
+        self._wait_host()
+        self._parent_comm.Barrier()
+        if self.proc_tasks[self._parent_rank] == self.id_to:
+            self._toDevice()
+            self._wait_device()
+        self._parent_comm.Barrier()
+
+    def _apply_host(self, simulation=None):
+        if __VERBOSE__:
+            print ("{0} APPLY HOST".format(self._parent_rank))
+        self._host()
+        self._wait_host()
+
+    def _host(self, simulation=None):
+        """
+        Proceed with data redistribution from opFrom to opTo
+        """
+        self._parent_comm.Barrier()
+        self.r_request = {}
+        self.s_request = {}
+
+        for v in self.variables:
+            topology = self.variables[v][self._currentIndex]
+            rank = topology.comm.Get_rank()
+            dv = v.discreteFields[topology]
+            transfers = self.bridges[v].transfers
+            for d in self._range_components(v):
+                v_name = dv.name + S_DIR[d]
+                # Set reception
+                if self._isSource:
+                    for from_rk in transfers.keys():
+                        self.r_request[v_name + str(from_rk)] = \
+                            self.bridges[v].inter_comm.Irecv(
+                                [dv.data[d], 1, self._r_types[v][from_rk]],
+                                source=from_rk, tag=from_rk)
+                # Set Sending
+                if self._isTarget:
+                    for to_rk in transfers.keys():
+                        self.s_request[v_name + str(to_rk)] = \
+                            self.bridges[v].inter_comm.Issend(
+                                [dv.data[d], 1, self._r_types[v][to_rk]],
+                                dest=to_rk, tag=rank)
+
+    def _toHost(self):
+        """
+        Proceed with data transfer of variables from device to host
+        """
+        if __VERBOSE__:
+            print ("{0} APPLY toHOST".format(self._parent_rank))
+        for v in self.variables:
+            dv = self.opFrom.discreteFields[v]
+            if dv in self._toHost_fields:
+                dv.toHost(self.component)
+
+    def _toDevice(self):
+        """
+        Proceed with data transfer of variables from device to host
+        """
+        if __VERBOSE__:
+            print ("{0} APPLY toDEVICE".format(self._parent_rank))
+        for v in self.variables:
+            dv = self.opTo.discreteFields[v]
+            if dv in self._toDevice_fields:
+                dv.toDevice(self.component)
+
+    def _wait_host(self, simulation=None):
+        """Wait for requests completion."""
+        if __VERBOSE__:
+            print ("{0} WAIT MPI".format(self._parent_rank))
+        for rk in self.r_request:
+            self.r_request[rk].Wait()
+        for rk in self.s_request:
+            self.s_request[rk].Wait()
+        self._parent_comm.Barrier()
+        self.r_request = []
+        self.s_request = []
+
diff --git a/HySoP/hysop/operator/redistribute_intra.py b/HySoP/hysop/operator/redistribute_intra.py
new file mode 100644
index 0000000000000000000000000000000000000000..a807c15b4bda07c4c5ade56e3d339d649fea19aa
--- /dev/null
+++ b/HySoP/hysop/operator/redistribute_intra.py
@@ -0,0 +1,238 @@
+"""
+@file redistribute_intra.py
+Setup for data transfer/redistribution between two parmes topologies.
+
+This operator is an inter operator which is supposed to define the process to
+transfer variables and data from one operator to another.
+This mainly concerns data redistribution if the two operators work on
+different mpi topologies.
+
+When is it required to define an operator between op1 and op2?
+If:
+- the intersection between op1.output-variables and op2.input-variables
+  is not empty
+- AND if the topology on which the variables of op1 are defined is different
+from the one of op2 variables.
+
+Note Franck: this kind of operator may also be useful
+to define the interpolation/filter process for data transfer for
+a variable defined on several meshes.
+
+"""
+from parmepy import __VERBOSE__
+from parmepy.constants import debug, PARMES_MPI_REAL, ORDERMPI, np, S_DIR
+from parmepy.mpi.bridge import Bridge
+from parmepy.methods_keys import Support
+from parmepy.operator.redistribute import Redistribute
+from parmepy.operator.continuous import Tools
+from parmepy.operator.continuous import Operator
+
+
+class RedistributeIntra(Redistribute):
+    """
+    Interconnection between two operators.
+    SetUp will compute (or get if it already exists) a Bridge between two
+    topologies.
+    Apply redistributes data from opFrom topology to opTo topology.
+
+    """
+    @debug
+    def __init__(self, **kwds):
+
+        """
+        Create an operator to distribute data between two mpi topologies for a
+        list of variables belonging to two operators.
+
+        @param variables : the set of variables to be redistributed
+        @param opFrom : source operator
+        @param opTo : target (i.e.) the operator that handles the topology on
+        which data must be redistributed.
+        @param component: components of vector fields to consider (default:
+        None, all components are taken).
+        """
+        super(RedistributeIntra, self).__init__(**kwds)
+
+        self._hasRequests = False
+
+        # Enable desynchronization: the opTo operator must call the wait
+        # function of this redistribute. This operator has to know self.
+        ##self.opTo.addRedistributeRequirement(self)
+
+    @debug
+    def setup(self):
+        """
+        Computes intersection of two topologies.
+
+        """
+
+        msg = 'Source and target objects must be of the same type, '
+        msg += 'either topology or operator.'
+        assert self._source.__class__ == self._target.__class__, msg
+        super(RedistributeIntra, self).setup()
+
+        for v in self.variables:
+            val = self.variables[v]
+            assert val[0].task_id == val[1].task_id
+        self.task_id = val[0].task_id
+
+        if isinstance(self._source, Operator):
+            source_isGPU = Tools.checkDevice(self._source)
+            target_isGPU = Tools.checkDevice(self._target)
+        else:
+            source_isGPU = False
+            target_isGPU = False
+
+        if not source_isGPU and not target_isGPU:
+            # case: opFrom(host) --host--> opTo(host)
+            self.apply = self._host
+            self.wait = self._wait_host
+        else:
+            # Have on device operators
+            self.wait = self._wait_all
+            if source_isGPU and not target_isGPU:
+                # case: opFrom(GPU) --toHost--host--> opTo(host)
+                self.apply = self._apply_toHost_host
+            elif not source_isGPU and target_isGPU:
+                # case: opFrom(host) --host--toDevice--> opTo(GPU)
+                self.apply = self._apply_host_toDevice
+            else:
+                # case: opFrom(GPU) --toHost--host--toDevice--> opTo(host)
+                # Transfers are removed if variables are batched
+                if np.any([self.vSource[v].isBatch for v in self.variables] +
+                          [self.vTarget[v].isBatch for v in self.variables]):
+                    self.apply = self._host
+                else:
+                    self.apply = self._apply_toHost_host_toDevice
+
+        for v in self.variables:
+            # toTransfer list completion
+            if source_isGPU:
+                self._toHost_fields.append(self.opFrom.discreteFields[v])
+            if target_isGPU:
+                self._toDevice_fields.append(self.opTo.discreteFields[v])
+
+        if self._parent_comm is None:
+            self._parent_comm = self.variables.values()[0][0].parent()
+        self._parent_rank = self._parent_comm.Get_rank()
+
+        # Flag telling if there will be some mpi data transfers.
+        self._useless_transfer = {}
+        for v in self.variables:
+            self._useless_transfer[v] = (source_isGPU and target_isGPU) and \
+                self.bridges[v].uselessTransfer()
+
+            br = self.bridges[v]
+            vToShape = self.vTarget[v].data[0].shape
+            vFromShape = self.vSource[v].data[0].shape
+            br.createRecvSubArray(self._r_types[v], vToShape)
+            br.createSendSubArray(self._s_types[v], vFromShape)
+
+    def _apply_toHost_host_toDevice(self, simulation=None):
+        if __VERBOSE__:
+            print ("{0} APPLY toHOST+HOST+toDEVICE".format(self._parent_rank))
+        self._toHost()
+        self._wait_device()
+        self._host()
+        self._wait_host()
+        self._toDevice()
+
+    def _apply_toHost_host(self, simulation=None):
+        if __VERBOSE__:
+            print ("{0} APPLY toHOST+HOST".format(self._parent_rank))
+        self._toHost()
+        self._wait_device()
+        self._host()
+
+    def _apply_host_toDevice(self, simulation=None):
+        if __VERBOSE__:
+            print ("{0} APPLY HOST+toDEVICE".format(self._parent_rank))
+        self._host()
+        self._wait_host()
+        self._toDevice()
+
+    def _toHost(self):
+        """
+        Proceed with data transfer of variables from device to host
+        """
+        for v in self.variables:
+            dv = self.vSource[v]
+            if dv in self._toHost_fields:
+                if not self._useless_transfer[v]:
+                    dv.toHost(self.component)
+
+    def _toDevice(self):
+        """
+        Proceed with data transfer of variables from device to host
+        """
+        for v in self.variables:
+            dv = self.vTarget[v]
+            if dv in self._toDevice_fields:
+                if not self._useless_transfer[v]:
+                    dv.toDevice(self.component)
+
+    def _host(self, simulation=None):
+        """
+        Proceed with data redistribution from opFrom to opTo
+        """
+        # TODO :
+        # - save a set of bridges in the domain and access them from operator
+        # - process all variables in one shot if they have the same topo
+        # (use buffers for mpi send/recv? )
+        # - move MPI datatypes into the bridge? --> and free MPI type properly
+        if __VERBOSE__:
+            print ("{0} APPLY HOST".format(self._parent_rank))
+        self.r_request = {}
+        self.s_request = {}
+        for v in self.variables:
+            br = self.bridges[v]
+            # Apply for each component considered
+            for d in self._range_components(v):
+                if __VERBOSE__:
+                    print ("{0} APPLY HOST".format(self._parent_rank),
+                           self.vSource[v].name, '->',
+                           self.vTarget[v].name, S_DIR[d])
+                vTo = self.vTarget[v].data[d]
+                vFrom = self.vSource[v].data[d]
+                v_name = self.vSource[v].name + S_DIR[d]
+                if br.hasLocalInter:
+                    vTo[br.ito] = vFrom[br.ifrom]
+                cRk = self._parent_comm.Get_rank()
+                for rk in br.recvFrom:
+                    recvtag = (cRk + 1) * 989 + (rk + 1) * 99 + (d + 1) * 88
+                    self.r_request[v_name + str(rk)] = \
+                        self._parent_comm.Irecv([vTo, 1, self._r_types[v][rk]],
+                                                source=rk, tag=recvtag)
+                    self._hasRequests = True
+                for rk in br.sendTo:
+                    sendtag = (rk + 1) * 989 + (cRk + 1) * 99 + (d + 1) * 88
+                    self.s_request[v_name + str(rk)] = \
+                        self._parent_comm.Issend([vFrom, 1,
+                                                 self._s_types[v][rk]],
+                                                 dest=rk, tag=sendtag)
+                    self._hasRequests = True
+
+    def _wait_host(self):
+        """
+        MPI Barrier to wait for the end
+        of all communication requests.
+        """
+        if __VERBOSE__:
+            print ("{0}", "WAIT MPI".format(self._parent_rank),
+                   self._hasRequests)
+        if self._hasRequests:
+            for rk in self.r_request.keys():
+                self.r_request[rk].Wait()
+            for rk in self.s_request.keys():
+                self.s_request[rk].Wait()
+        self._hasRequests = False
+
+    def _wait_all(self):
+        self._wait_host()
+        self._wait_device()
+
+    def addRedistributeRequirement(self, red):
+        raise ValueError(
+            "Cannot add a requirement to a Redistribute_intra operator.")
+
+    def getRedistributeRequirement(self):
+        return []
diff --git a/HySoP/hysop/operator/reprojection.py b/HySoP/hysop/operator/reprojection.py
index a87445a6071da101bff3da95dc3f2d52c3b52208..bf36bf9ebc61b734425c34ce0c57ec9b14f03819 100644
--- a/HySoP/hysop/operator/reprojection.py
+++ b/HySoP/hysop/operator/reprojection.py
@@ -1,180 +1,53 @@
 # -*- coding: utf-8 -*-
 """
-@file reprojection_criterion.py
+@file operator/reprojection.py
 Compute reprojection criterion and divergence maximum
 """
-import numpy as np
-from parmepy.constants import debug, PARMES_MPI_REAL
-from parmepy.methods_keys import SpaceDiscretisation
-from parmepy.operator.monitors.monitoring import Monitoring
-from parmepy.numerics.finite_differences import FD_C_4
-from parmepy.numerics.differential_operations import GradV
-import parmepy.tools.numpywrappers as npw
-from parmepy.numerics.updateGhosts import UpdateGhosts
-from parmepy.mpi import MPI
-from parmepy.tools.timers import timed_function
+from parmepy.operator.computational import Computational
+from parmepy.operator.discrete.reprojection import Reprojection as RD
+from parmepy.operator.continuous import opsetup
 
 
-class Reprojection_criterion(Monitoring):
+class Reprojection(Computational):
     """
     Computes and prints reprojection criterion.
     See the related PDF called "vorticity_solenoidal_projection.pdf"
     in ParmesDoc for more details.
     """
-    def __init__(self, vorticity, reproj_cst, reprojRate,
-                 checkCriterion=False, **kwds):
+    def __init__(self, vorticity, threshold, frequency, **kwds):
         """
         Constructor.
         @param vorticity field
-        @param method : finite difference scheme
-        @param topo : the topology on which we want to monitor the fields
-        @param prefix : output file name.
-        @param checkCriterion :  set behavior of this operator :
-        if True, compute some criterion and force frequency
-        to 1 if reprojection is needed.
-        else (false) performs reprojection every reprojRate iterations.
-        @param reprojRate : set rate of execution of the reprojection
-        @param io_params : parameters (dict) to set file output.
-        If  None, no output. Set io_params = {} if you want output,
-        with default parameters values. Default file name = 'reproj'
-        See parmepy.tools.io_utils.Writer for details
+        @param threshold : update frequency when criterion is greater than
+        this threshold
+        @param frequency : set frequency of execution of the reprojection
         """
-        
-        if 'io_params' in kwds:
-            params = kwds['io_params']
-            if not "filename" in params:
-                params["filename"] = "reproj"
-            # Set output buffer shape
-            params["writebuffshape"] = (1, 5)
-
-        super(Reprojection_criterion, self).__init__(variables=[vorticity],
-                                                     **kwds)
-        # \todo : rewrite for multiresolution case.
-        # Note FP : for multiresolution case, it would probably be
-        # better to use two different operators for energy and enstrophy.
-        ## Frequency for reprojection
-        self.reprojRate = reprojRate
-        ## The initial value will be used as default during
-        # simulation
-        self.default_rate = reprojRate
-        ## Set behavior of this operator :
-        ## if True, compute some criterion and force frequency
-        ## to 1 if reprojection is needed.
-        ## else (false) performs reprojection every frequency iterations.
-        self.checkCriterion = checkCriterion
+        assert 'variables' not in kwds, 'variables parameter is useless.'
+        super(Reprojection, self).__init__(variables=[vorticity], **kwds)
         # constant defining the reprojection criterion :
         # if the latter is greater than this constant, then a reprojection
         # is needed
-        self.reproj_cst = reproj_cst
-        self.proj_counter = 0
-        ## local buffer
-        ## diag = [time, projCriterion, d1, d2, proj_counter]
-        ## See apply for details about d1, d2.
-        self.diagnostics = npw.zeros(5)
-        # Connect writer buffer to diagnostics, if required
-        if self._writer is not None:
-            self._writer.buffer = self.diagnostics.reshape(1, 5)
-
-        if self.checkCriterion:
-            ## vorticity field
-            self.vorticity = vorticity
-            if self.method is None:
-                self.method = {SpaceDiscretisation: FD_C_4}
-            ## Numerical methods for space discretization
-            assert SpaceDiscretisation in self.method.keys()
-            self.method = self.method[SpaceDiscretisation]
-
-            self.input = [vorticity]
-        else:
-            self.input = []
-
+        self.threshold = threshold
+        ## Frequency for reprojection
+        self.frequency = frequency
+        ## vorticity field
+        self.vorticity = vorticity
+        self.input = [vorticity]
         self.output = []
 
-    def setUp(self):
-        if not self._isUpToDate and self.checkCriterion:
-            # Get discrete fields for vorticity.
-            # Note FP : two options to get the discrete fields:
-            # - 'field.discretization(topo)' that just try
-            # to get the discrete field and return an error
-            # if it does not exist.
-            # - 'field.discretizet(topo)
-            # try to get the discrete field and create
-            # a new discretization if it does not exist.
-            # Current choice : no creation.
-            self.discreteFields[self.vorticity] =\
-                self.vorticity.discretization(self.topology)
-
-            # prepare ghost points synchro for vorticity
-            self._synchronize = UpdateGhosts(self.topology,
-                                             self.vorticity.nbComponents)
-
-            # grad function
-            self._function = GradV(self.topology, method=self.method)
-
-            self.vort_d = self.discreteFields[self.vorticity]
-
-            memshape = self.vort_d.data[0].shape
-            worklength = self.vort_d.nbComponents ** 2
-            # gradient result array
-            self.grad = [npw.zeros(memshape) for i in xrange(worklength)]
-
-        self._isUpToDate = True
-
-    @debug
-    @timed_function
-    def apply(self, simulation=None):
-        """
-        Computes and prints reprojection criterion and divergence maximum
-        """
-        self.diagnostics[0] = simulation.time
-        ite = simulation.currentIteration
-
-        # Reset reprojection frequency to default
-        self.reprojRate = self.default_rate
-
-        # If required, computation of a criterion
-        # and reset frequency
-        if self.checkCriterion:
-            # Synchronize ghost points of vorticity
-            self._synchronize(self.vort_d.data)
-            # gradU computation
-            self.grad = self._function(self.vort_d.data, self.grad)
-            nbComponents = self.vorticity.nbComponents
-            # maxima of vorticity divergence (abs)
-            self.diagnostics[2] = \
-                np.max(abs(sum([(self.grad[(nbComponents + 1) * i])
-                               for i in xrange(nbComponents)])))
-
-            # maxima of partial derivatives of vorticity
-            for grad_n in self.grad:
-                self.diagnostics[3] = max(self.diagnostics[3],
-                                          np.max(abs(grad_n)))
-
-            # computation of the reprojection criterion and reduction
-            # among all proc.
-            projCriterion = self.diagnostics[2] / self.diagnostics[3]
-            projCriterion = \
-                self.topology.comm.allreduce(projCriterion, PARMES_MPI_REAL,
-                                          op=MPI.MAX)
-            self.diagnostics[1] = projCriterion
-
-            # is reprojection of vorticity needed for the future time step ?
-            if (projCriterion > self.reproj_cst):
-                self.reprojRate = 1
-
-        # Note FP : is counter really useful? Maybe it should be increased
-        # by the operator that use projection (poisson)?
-        if (ite % self.reprojRate == 0):
-            self.proj_counter += 1
-        self.diagnostics[4] = self.proj_counter
-
-        # Print results, if required
-        # Remark : writer buffer is (pointer) connected to diagnostics
-        if self._writer is not None and self._writer.doWrite(ite):
-            self._writer.write()
-
-    def doProjection(self, ite):
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
+        if not self._is_uptodate:
+            self.discrete_op = RD(self.discreteFields[self.vorticity],
+                                       self.threshold,
+                                       self.frequency, rwork=rwork,
+                                       method=self.method)
+        self._set_io('reprojection', (1, 4))
+        self.discrete_op.setWriter(self._writer)
+        self._is_uptodate = True
+
+    def do_projection(self, ite):
         """
         True if projection must be done
         """
-        return ite % self.reprojRate == 0
+        return self.discrete_op.do_projection(ite)
diff --git a/HySoP/hysop/operator/stretching.py b/HySoP/hysop/operator/stretching.py
index 222a24733535f26d76220f19af4ee6b9a2ced279..0222c57bb908cd5e4cd5b432bf9fa594e98373b4 100755
--- a/HySoP/hysop/operator/stretching.py
+++ b/HySoP/hysop/operator/stretching.py
@@ -9,32 +9,29 @@ from parmepy.constants import debug
 from parmepy.methods_keys import TimeIntegrator, Formulation, \
     SpaceDiscretisation
 from parmepy.numerics.finite_differences import FD_C_4
-from parmepy.operator.continuous import Operator
+from parmepy.operator.computational import Computational
+from parmepy.operator.continuous import opsetup
 from parmepy.operator.discrete.stretching import Conservative, GradUW
+from abc import ABCMeta
 
 
-class Stretching(Operator):
+class Stretching(Computational):
     """
     \todo write latex formulas
     """
+    __metaclass__ = ABCMeta
 
     @debug
     def __init__(self, velocity, vorticity, **kwds):
         """
         Create a Stretching operator from given
         velocity and vorticity variables.
-        'FD_C4 RK3',
-
         @param velocity field
         @param vorticity field
-        @param resolutions : grid resolution of velocity and vorticity
-        @param method : solving method
-        (default = finite differences, 4th order, in space
-        and Runge-Kutta 3 in time.)
-        @param topo : a predefined topology to discretize velocity/vorticity
-        @param ghosts : number of ghosts points. Default depends on the method.
-        Autom. computed if not set.
+        Default solving method : finite differences, 4th order, in space
+        and Runge-Kutta 3 in time.
         """
+        assert 'variables' not in kwds, 'variables parameter is useless.'
         super(Stretching, self).__init__(variables=[velocity, vorticity],
                                          **kwds)
         ## velocity variable (vector)
@@ -60,6 +57,26 @@ class Stretching(Operator):
         self.input = [self.velocity, self.vorticity]
         self.output = [self.vorticity]
 
+    def get_work_properties(self):
+        if not self._is_discretized:
+            msg = 'The operator must be discretized '
+            msg += 'before any call to this function.'
+            raise RuntimeError(msg)
+        #v_ind = self.variables[self.velocity].mesh.iCompute
+        vd = self.discreteFields[self.velocity]
+        shape_v = vd[0][...].shape
+        ti = self.method[TimeIntegrator]
+        rwork_length = ti.getWorkLengths(3)
+        import parmepy.numerics.differential_operations as diff_op
+        if self.formulation is GradUW:
+            rwork_length += diff_op.GradVxW.getWorkLengths()
+        elif self.formulation is Conservative:
+            rwork_length += diff_op.DivWV.getWorkLengths()
+        res = {'rwork': [], 'iwork': None}
+        for _ in xrange(rwork_length):
+            res['rwork'].append(shape_v)
+        return res
+
     def discretize(self):
         if self.method[SpaceDiscretisation] is FD_C_4:
             nbGhosts = 2
@@ -67,45 +84,13 @@ class Stretching(Operator):
             raise ValueError("Unknown method for space discretization of the\
                 stretching operator.")
 
-        self._standard_discretize(nbGhosts)
-        
-    @staticmethod
-    def getWorkLengths(method=None, domain_dim=None):
-        """
-        Return the length of working arrays lists required
-        for stretching discrete operator, depending on :
-        - the formulation (Conservative or GradUW)
-        - the time integrator (RK3, ...)
-        @param method : the dict of parameters for the operator.
-        Default = {TimeIntegrator: RK3, Formulation: Conservative,
-                      SpaceDiscretisation: FD_C_4}
-        """
-        if method is None:
-            import parmepy.default_methods as default
-            method = default.STRETCHING
-        assert Formulation in method,\
-            'A formulation is required for the stretching.'
-        assert TimeIntegrator in method,\
-            'A time integrator is required for the stretching.'
-        if method[Formulation] == "GradUW":
-            formulation = GradUW
-        else:
-            formulation = Conservative
-        return formulation.getWorkLengths(method[TimeIntegrator])
-
-    def setWorks(self, rwork=None, iwork=None):
-        if rwork is None:
-            rwork = []
-        if iwork is None:
-            iwork = []
-        self.discreteOperator.setWorks(rwork, iwork)
+        super(Stretching, self)._standard_discretize(nbGhosts)
 
     @debug
-    def setUp(self):
-        self.discreteOperator =\
-            self.formulation(self.discreteFields[self.velocity],
-                             self.discreteFields[self.vorticity],
-                             method=self.method)
-
-        self.discreteOperator.setUp()
-        self._isUpToDate = True
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
+        self.discrete_op =\
+            self.formulation(velocity=self.discreteFields[self.velocity],
+                             vorticity=self.discreteFields[self.vorticity],
+                             method=self.method, rwork=rwork, iwork=iwork)
+        self._is_uptodate = True
diff --git a/HySoP/hysop/operator/tests/Testing/Temporary/CTestCostData.txt b/HySoP/hysop/operator/tests/Testing/Temporary/CTestCostData.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ed97d539c095cf1413af30cc23dea272095b97dd
--- /dev/null
+++ b/HySoP/hysop/operator/tests/Testing/Temporary/CTestCostData.txt
@@ -0,0 +1 @@
+---
diff --git a/HySoP/hysop/operator/tests/Testing/Temporary/LastTest.log b/HySoP/hysop/operator/tests/Testing/Temporary/LastTest.log
new file mode 100644
index 0000000000000000000000000000000000000000..b448b0555ed69bab888caa8f66c3534ccd6779ed
--- /dev/null
+++ b/HySoP/hysop/operator/tests/Testing/Temporary/LastTest.log
@@ -0,0 +1,3 @@
+Start testing: Mar 12 09:26 CET
+----------------------------------------------------------
+End testing: Mar 12 09:26 CET
diff --git a/HySoP/hysop/operator/tests/parmesfftw.log b/HySoP/hysop/operator/tests/parmesfftw.log
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/HySoP/hysop/operator/tests/ref_files/p1/scal2D_.xmf b/HySoP/hysop/operator/tests/ref_files/p1/scal2D_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..a4550cd4b60b828fda35100cb4da9767921238f6
--- /dev/null
+++ b/HySoP/hysop/operator/tests/ref_files/p1/scal2D_.xmf
@@ -0,0 +1,25 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.0" />
+    <Topology TopologyType="2DCORECTMesh" NumberOfElements="32  32 "/>
+    <Geometry GeometryType="ORIGIN_DXDY">
+     <DataItem Dimensions="2 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
+     0.0625  0.0625
+     </DataItem>
+    </Geometry>
+    <Attribute Name="scal2D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      scal2D__00000.h5:/scal2D_0_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/ref_files/p1/scal2D__00000.h5 b/HySoP/hysop/operator/tests/ref_files/p1/scal2D__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..b5fbadc271d7690af2b42e47e335d39dbcc26cf8
Binary files /dev/null and b/HySoP/hysop/operator/tests/ref_files/p1/scal2D__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/ref_files/p1/vel2D_.xmf b/HySoP/hysop/operator/tests/ref_files/p1/vel2D_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..2929b31d1c0106c738472f18f230009019257410
--- /dev/null
+++ b/HySoP/hysop/operator/tests/ref_files/p1/vel2D_.xmf
@@ -0,0 +1,30 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.0" />
+    <Topology TopologyType="2DCORECTMesh" NumberOfElements="32  32 "/>
+    <Geometry GeometryType="ORIGIN_DXDY">
+     <DataItem Dimensions="2 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
+     0.0625  0.0625
+     </DataItem>
+    </Geometry>
+    <Attribute Name="vel2D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel2D__00000.h5:/vel2D_0_X
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vel2D_0_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel2D__00000.h5:/vel2D_0_Y
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/ref_files/p1/vel2D__00000.h5 b/HySoP/hysop/operator/tests/ref_files/p1/vel2D__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..a03bcc76e89c8abba7cb10dea881f3e7eb1a80a3
Binary files /dev/null and b/HySoP/hysop/operator/tests/ref_files/p1/vel2D__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/ref_files/p1/vel3D_.xmf b/HySoP/hysop/operator/tests/ref_files/p1/vel3D_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..70ea05cf5bc765835ac45b2695f7c0dee8928557
--- /dev/null
+++ b/HySoP/hysop/operator/tests/ref_files/p1/vel3D_.xmf
@@ -0,0 +1,35 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.0" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="32  32  32 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.0625  0.0625  0.0625
+     </DataItem>
+    </Geometry>
+    <Attribute Name="vel3D_0_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel3D__00000.h5:/vel3D_0_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vel3D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel3D__00000.h5:/vel3D_0_X
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vel3D_0_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel3D__00000.h5:/vel3D_0_Z
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/ref_files/p1/vel3D__00000.h5 b/HySoP/hysop/operator/tests/ref_files/p1/vel3D__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..f3fb261707aa67bc5df10b275af852828a254435
Binary files /dev/null and b/HySoP/hysop/operator/tests/ref_files/p1/vel3D__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/ref_files/p2/scal2D_.xmf b/HySoP/hysop/operator/tests/ref_files/p2/scal2D_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..a4550cd4b60b828fda35100cb4da9767921238f6
--- /dev/null
+++ b/HySoP/hysop/operator/tests/ref_files/p2/scal2D_.xmf
@@ -0,0 +1,25 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.0" />
+    <Topology TopologyType="2DCORECTMesh" NumberOfElements="32  32 "/>
+    <Geometry GeometryType="ORIGIN_DXDY">
+     <DataItem Dimensions="2 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
+     0.0625  0.0625
+     </DataItem>
+    </Geometry>
+    <Attribute Name="scal2D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      scal2D__00000.h5:/scal2D_0_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/ref_files/p2/scal2D__00000.h5 b/HySoP/hysop/operator/tests/ref_files/p2/scal2D__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..f9122a9b8f79b82e2a80c697386794c9d163b1a3
Binary files /dev/null and b/HySoP/hysop/operator/tests/ref_files/p2/scal2D__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/ref_files/p2/vel2D_.xmf b/HySoP/hysop/operator/tests/ref_files/p2/vel2D_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..2929b31d1c0106c738472f18f230009019257410
--- /dev/null
+++ b/HySoP/hysop/operator/tests/ref_files/p2/vel2D_.xmf
@@ -0,0 +1,30 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.0" />
+    <Topology TopologyType="2DCORECTMesh" NumberOfElements="32  32 "/>
+    <Geometry GeometryType="ORIGIN_DXDY">
+     <DataItem Dimensions="2 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
+     0.0625  0.0625
+     </DataItem>
+    </Geometry>
+    <Attribute Name="vel2D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel2D__00000.h5:/vel2D_0_X
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vel2D_0_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel2D__00000.h5:/vel2D_0_Y
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/ref_files/p2/vel2D__00000.h5 b/HySoP/hysop/operator/tests/ref_files/p2/vel2D__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..f75363cac458b84a0ce857e1865e7c4bf3297aa0
Binary files /dev/null and b/HySoP/hysop/operator/tests/ref_files/p2/vel2D__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/ref_files/p2/vel3D_.xmf b/HySoP/hysop/operator/tests/ref_files/p2/vel3D_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..70ea05cf5bc765835ac45b2695f7c0dee8928557
--- /dev/null
+++ b/HySoP/hysop/operator/tests/ref_files/p2/vel3D_.xmf
@@ -0,0 +1,35 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.0" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="32  32  32 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.0625  0.0625  0.0625
+     </DataItem>
+    </Geometry>
+    <Attribute Name="vel3D_0_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel3D__00000.h5:/vel3D_0_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vel3D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel3D__00000.h5:/vel3D_0_X
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vel3D_0_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel3D__00000.h5:/vel3D_0_Z
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/ref_files/p2/vel3D__00000.h5 b/HySoP/hysop/operator/tests/ref_files/p2/vel3D__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..e85565de98a6f9211182e1f3e358633ef3d6e93e
Binary files /dev/null and b/HySoP/hysop/operator/tests/ref_files/p2/vel3D__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/ref_files/p4/scal2D_.xmf b/HySoP/hysop/operator/tests/ref_files/p4/scal2D_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..a4550cd4b60b828fda35100cb4da9767921238f6
--- /dev/null
+++ b/HySoP/hysop/operator/tests/ref_files/p4/scal2D_.xmf
@@ -0,0 +1,25 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.0" />
+    <Topology TopologyType="2DCORECTMesh" NumberOfElements="32  32 "/>
+    <Geometry GeometryType="ORIGIN_DXDY">
+     <DataItem Dimensions="2 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
+     0.0625  0.0625
+     </DataItem>
+    </Geometry>
+    <Attribute Name="scal2D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      scal2D__00000.h5:/scal2D_0_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/ref_files/p4/scal2D__00000.h5 b/HySoP/hysop/operator/tests/ref_files/p4/scal2D__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..44d6a0c80e902e79ec8099c9e9d39e9a4280b894
Binary files /dev/null and b/HySoP/hysop/operator/tests/ref_files/p4/scal2D__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/ref_files/p4/vel2D_.xmf b/HySoP/hysop/operator/tests/ref_files/p4/vel2D_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..2929b31d1c0106c738472f18f230009019257410
--- /dev/null
+++ b/HySoP/hysop/operator/tests/ref_files/p4/vel2D_.xmf
@@ -0,0 +1,30 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.0" />
+    <Topology TopologyType="2DCORECTMesh" NumberOfElements="32  32 "/>
+    <Geometry GeometryType="ORIGIN_DXDY">
+     <DataItem Dimensions="2 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
+     0.0625  0.0625
+     </DataItem>
+    </Geometry>
+    <Attribute Name="vel2D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel2D__00000.h5:/vel2D_0_X
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vel2D_0_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel2D__00000.h5:/vel2D_0_Y
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/ref_files/p4/vel2D__00000.h5 b/HySoP/hysop/operator/tests/ref_files/p4/vel2D__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..feef864a994be9d6f64d4b2456976230bdc03586
Binary files /dev/null and b/HySoP/hysop/operator/tests/ref_files/p4/vel2D__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/ref_files/p4/vel3D_.xmf b/HySoP/hysop/operator/tests/ref_files/p4/vel3D_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..70ea05cf5bc765835ac45b2695f7c0dee8928557
--- /dev/null
+++ b/HySoP/hysop/operator/tests/ref_files/p4/vel3D_.xmf
@@ -0,0 +1,35 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.0" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="32  32  32 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.0625  0.0625  0.0625
+     </DataItem>
+    </Geometry>
+    <Attribute Name="vel3D_0_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel3D__00000.h5:/vel3D_0_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vel3D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel3D__00000.h5:/vel3D_0_X
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vel3D_0_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel3D__00000.h5:/vel3D_0_Z
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/ref_files/p4/vel3D__00000.h5 b/HySoP/hysop/operator/tests/ref_files/p4/vel3D__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..d60a0e2a7028e65a04ccaa2307dca2302d23d059
Binary files /dev/null and b/HySoP/hysop/operator/tests/ref_files/p4/vel3D__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/ref_files/p8/scal2D_.xmf b/HySoP/hysop/operator/tests/ref_files/p8/scal2D_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..a4550cd4b60b828fda35100cb4da9767921238f6
--- /dev/null
+++ b/HySoP/hysop/operator/tests/ref_files/p8/scal2D_.xmf
@@ -0,0 +1,25 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.0" />
+    <Topology TopologyType="2DCORECTMesh" NumberOfElements="32  32 "/>
+    <Geometry GeometryType="ORIGIN_DXDY">
+     <DataItem Dimensions="2 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
+     0.0625  0.0625
+     </DataItem>
+    </Geometry>
+    <Attribute Name="scal2D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      scal2D__00000.h5:/scal2D_0_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/ref_files/p8/scal2D__00000.h5 b/HySoP/hysop/operator/tests/ref_files/p8/scal2D__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..d70553038a04d02ba3310e5baaf9ae89967fbf86
Binary files /dev/null and b/HySoP/hysop/operator/tests/ref_files/p8/scal2D__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/ref_files/p8/vel2D_.xmf b/HySoP/hysop/operator/tests/ref_files/p8/vel2D_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..2929b31d1c0106c738472f18f230009019257410
--- /dev/null
+++ b/HySoP/hysop/operator/tests/ref_files/p8/vel2D_.xmf
@@ -0,0 +1,30 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.0" />
+    <Topology TopologyType="2DCORECTMesh" NumberOfElements="32  32 "/>
+    <Geometry GeometryType="ORIGIN_DXDY">
+     <DataItem Dimensions="2 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
+     0.0625  0.0625
+     </DataItem>
+    </Geometry>
+    <Attribute Name="vel2D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel2D__00000.h5:/vel2D_0_X
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vel2D_0_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel2D__00000.h5:/vel2D_0_Y
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/ref_files/p8/vel2D__00000.h5 b/HySoP/hysop/operator/tests/ref_files/p8/vel2D__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..bdbb3395082b80bb72a465a9b724d57476f30b8e
Binary files /dev/null and b/HySoP/hysop/operator/tests/ref_files/p8/vel2D__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/ref_files/p8/vel3D_.xmf b/HySoP/hysop/operator/tests/ref_files/p8/vel3D_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..70ea05cf5bc765835ac45b2695f7c0dee8928557
--- /dev/null
+++ b/HySoP/hysop/operator/tests/ref_files/p8/vel3D_.xmf
@@ -0,0 +1,35 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.0" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="32  32  32 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.0625  0.0625  0.0625
+     </DataItem>
+    </Geometry>
+    <Attribute Name="vel3D_0_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel3D__00000.h5:/vel3D_0_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vel3D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel3D__00000.h5:/vel3D_0_X
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vel3D_0_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="32  32  32 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vel3D__00000.h5:/vel3D_0_Z
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/ref_files/p8/vel3D__00000.h5 b/HySoP/hysop/operator/tests/ref_files/p8/vel3D__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..f248e4dd5196c14f42e9e5dca936fa8a851e1ef0
Binary files /dev/null and b/HySoP/hysop/operator/tests/ref_files/p8/vel3D__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/test_Stretching.py b/HySoP/hysop/operator/tests/test_Stretching.py
index 95b4b4d9656ed9febfa6f1a7a8db35372a7ef92f..497b4138d2c8d8a4a0c973271dadfeed9ad5f976 100755
--- a/HySoP/hysop/operator/tests/test_Stretching.py
+++ b/HySoP/hysop/operator/tests/test_Stretching.py
@@ -4,65 +4,69 @@ import numpy as np
 from parmepy.fields.continuous import Field
 from parmepy.operator.stretching import Stretching
 from parmepy.problem.simulation import Simulation
-from parmepy.methods_keys import TimeIntegrator, Formulation,\
-    SpaceDiscretisation
-from parmepy.methods import RK3, FD_C_4, Conservative
+#from parmepy.methods_keys import TimeIntegrator, Formulation,\
+#    SpaceDiscretisation
+#from parmepy.methods import RK3, FD_C_4, Conservative
+from parmepy.tools.parameters import Discretization
+import parmepy.tools.numpywrappers as npw
+pi = np.pi
+cos = np.cos
+sin = np.sin
 
 
 def computeVel(res, x, y, z, t):
-    amodul = np.cos(np.pi * 1. / 3.)
-    pix = np.pi * x
-    piy = np.pi * y
-    piz = np.pi * z
+    amodul = cos(pi * 1. / 3.)
+    pix = pi * x
+    piy = pi * y
+    piz = pi * z
     pi2x = 2. * pix
     pi2y = 2. * piy
     pi2z = 2. * piz
-    res[0][...] = 2. * np.sin(pix) * np.sin(pix) \
-        * np.sin(pi2y) * np.sin(pi2z) * amodul
-    res[1][...] = - np.sin(pi2x) * np.sin(piy) \
-        * np.sin(piy) * np.sin(pi2z) * amodul
-    res[2][...] = - np.sin(pi2x) * np.sin(piz) \
-        * np.sin(piz) * np.sin(pi2y) * amodul
+    res[0][...] = 2. * sin(pix) * sin(pix) \
+        * sin(pi2y) * sin(pi2z) * amodul
+    res[1][...] = - sin(pi2x) * sin(piy) \
+        * sin(piy) * sin(pi2z) * amodul
+    res[2][...] = - sin(pi2x) * sin(piz) \
+        * sin(piz) * sin(pi2y) * amodul
     return res
 
 
 def computeVort(res, x, y, z, t):
-    amodul = np.cos(np.pi * 1. / 3.)
-    pix = np.pi * x
-    piy = np.pi * y
-    piz = np.pi * z
+    amodul = cos(pi * 1. / 3.)
+    pix = pi * x
+    piy = pi * y
+    piz = pi * z
     pi2x = 2. * pix
     pi2y = 2. * piy
     pi2z = 2. * piz
-    res[0][...] = 2. * np.pi * np.sin(pi2x) * amodul *\
-        (- np.cos(pi2y) * np.sin(piz) * np.sin(piz)
-         + np.sin(piy) * np.sin(piy) * np.cos(pi2z))
+    res[0][...] = 2. * pi * sin(pi2x) * amodul *\
+        (- cos(pi2y) * sin(piz) * sin(piz)
+         + sin(piy) * sin(piy) * cos(pi2z))
 
-    res[1][...] = 2. * np.pi * np.sin(pi2y) * amodul *\
-        (2. * np.cos(pi2z) * np.sin(pix) * np.sin(pix)
-         + np.sin(piz) * np.sin(piz) * np.cos(pi2x))
+    res[1][...] = 2. * pi * sin(pi2y) * amodul *\
+        (2. * cos(pi2z) * sin(pix) * sin(pix)
+         + sin(piz) * sin(piz) * cos(pi2x))
 
-    res[2][...] = -2. * np.pi * np.sin(pi2z) * amodul *\
-        (np.cos(pi2x) * np.sin(piy) * np.sin(piy)
-         + np.sin(pix) * np.sin(pix) * np.cos(pi2y))
+    res[2][...] = -2. * pi * sin(pi2z) * amodul *\
+        (cos(pi2x) * sin(piy) * sin(piy)
+         + sin(pix) * sin(pix) * cos(pi2y))
 
     return res
 
 
-def test_Stretching():
+def test_stretching():
+
     # Parameters
     nb = 33
-    dim = 3
     boxLength = [1., 1., 1.]
     boxMin = [0., 0., 0.]
-    nbElem = [nb, nb, nb]
-
+    nbElem = Discretization([nb, nb, nb], [2, 2, 2])
     timeStep = 0.05
 
-    ## Domain
-    box = pp.Box(dim, length=boxLength, origin=boxMin)
+    # Domain
+    box = pp.Box(length=boxLength, origin=boxMin)
 
-    ## Fields
+    # Fields
     velo = Field(
         domain=box, formula=computeVel,
         name='Velocity', isVector=True)
@@ -70,21 +74,57 @@ def test_Stretching():
         domain=box, formula=computeVort,
         name='Vorticity', isVector=True)
 
-    ## Operators
-    method = {TimeIntegrator: RK3, Formulation: Conservative,
-              SpaceDiscretisation: FD_C_4}
-    stretch = Stretching(velo, vorti,
-                         resolutions={velo: nbElem,
-                                      vorti: nbElem},
-                         method=method)
+    # Operators
+    #method = {TimeIntegrator: RK3, Formulation: Conservative,
+    #          SpaceDiscretisation: FD_C_4}
+    stretch = Stretching(velo, vorti, discretization=nbElem)
     stretch.discretize()
     topo = stretch.discreteFields[velo].topology
     velo.initialize(topo=topo)
     vorti.initialize(topo=topo)
-    stretch.setUp()
-    simulation = Simulation(tinit=0, tend=20, timeStep=timeStep)
+    stretch.setup()
+    simulation = Simulation(tinit=0, tend=1., timeStep=timeStep)
     stretch.apply(simulation)
 
 
+def test_stretching_external_work():
+    # Parameters
+    nb = 33
+    boxLength = [1., 1., 1.]
+    boxMin = [0., 0., 0.]
+    nbElem = Discretization([nb, nb, nb], [2, 2, 2])
+    timeStep = 0.05
+
+    # Domain
+    box = pp.Box(length=boxLength, origin=boxMin)
+
+    # Fields
+    velo = Field(
+        domain=box, formula=computeVel,
+        name='Velocity', isVector=True)
+    vorti = Field(
+        domain=box, formula=computeVort,
+        name='Vorticity', isVector=True)
+
+    # Operators
+    #method = {TimeIntegrator: RK3, Formulation: Conservative,
+    #          SpaceDiscretisation: FD_C_4}
+    stretch = Stretching(velo, vorti, discretization=nbElem)
+    stretch.discretize()
+    wk_p = stretch.get_work_properties()
+    rwork = []
+    wk_length = len(wk_p['rwork'])
+    for i in xrange(wk_length):
+        memshape = wk_p['rwork'][i]
+        rwork.append(npw.zeros(memshape))
+
+    topo = stretch.discreteFields[velo].topology
+    velo.initialize(topo=topo)
+    vorti.initialize(topo=topo)
+    stretch.setup(rwork=rwork)
+    simulation = Simulation(tinit=0, tend=1., timeStep=timeStep)
+    stretch.apply(simulation)
+
 if __name__ == "__main__":
-    test_Stretching()
+    test_stretching()
+    test_stretching_external_work()
diff --git a/HySoP/hysop/operator/tests/test_adaptive_time_step.py b/HySoP/hysop/operator/tests/test_adaptive_time_step.py
new file mode 100644
index 0000000000000000000000000000000000000000..63c5df60c102df1f994d7f4ee39147203549f4be
--- /dev/null
+++ b/HySoP/hysop/operator/tests/test_adaptive_time_step.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+import parmepy as pp
+from parmepy.operator.adapt_timestep import AdaptTimeStep
+from parmepy.problem.simulation import Simulation
+from parmepy.tools.parameters import Discretization
+from parmepy import VariableParameter, Field
+import numpy as np
+import parmepy.tools.numpywrappers as npw
+import os
+sin = np.sin
+cos = np.cos
+
+d3d = Discretization([33, 33, 33], [2, 2, 2])
+
+
+def computeVel(res, x, y, z, t):
+    res[0][...] = sin(x * t) * cos(y) * cos(z)
+    res[1][...] = - cos(x) * sin(y) * cos(z)
+    res[2][...] = 0.
+    return res
+
+
+def computeVort(res, x, y, z, t):
+    res[0][...] = 0.
+    res[1][...] = 0.
+    res[2][...] = sin(x) * cos(y * t) * cos(z)
+    return res
+
+
+def init():
+    box = pp.Box(length=[2.] * 3, origin=[0.0] * 3)
+    velo = Field(domain=box, formula=computeVel,
+                 name='Velocity', isVector=True)
+    vorti = Field(domain=box, formula=computeVort,
+                  name='Vorticity', isVector=True)
+    return velo, vorti
+
+
+def test_adapt():
+    """
+    Todo : write proper tests.
+    Here we just check if discr/setup/apply process goes well.
+    """
+    velo, vorti = init()
+    dt = VariableParameter(data=0.0125, name='dt')
+    op = AdaptTimeStep(velo, vorti, dt_adapt=dt,
+                       discretization=d3d, lcfl=0.125, cfl=0.5)
+    op.discretize()
+    op.setup()
+    simu = Simulation(nbIter=2)
+    op.apply(simu)
+
+
+def test_adapt_2():
+    """
+    The same but with file output
+    """
+    velo, vorti = init()
+    dt = VariableParameter(data=0.0125, name='dt')
+    op = AdaptTimeStep(velo, vorti, dt_adapt=dt, io_params=True,
+                       discretization=d3d, lcfl=0.125, cfl=0.5)
+    op.discretize()
+    op.setup()
+    simu = Simulation(nbIter=2)
+    op.apply(simu)
+    filename = op.io_params.filename
+    assert os.path.exists(filename)
+
+
+def test_adapt_3():
+    """
+    The same but with external work vector
+    """
+    velo, vorti = init()
+    dt = VariableParameter(data=0.0125, name='dt')
+    op = AdaptTimeStep(velo, vorti, dt_adapt=dt, io_params=True,
+                       discretization=d3d, lcfl=0.125, cfl=0.5)
+    op.discretize()
+    wk_p = op.get_work_properties()
+    rwork = []
+    wk_length = len(wk_p['rwork'])
+    for i in xrange(wk_length):
+        memshape = wk_p['rwork'][i]
+        rwork.append(npw.zeros(memshape))
+
+    op.setup(rwork=rwork)
+    simu = Simulation(nbIter=2)
+    op.apply(simu)
+    filename = op.io_params.filename
+    assert os.path.exists(filename)
+
+
+if __name__ == "__main__":
+    test_adapt()
+    test_adapt_2()
+    test_adapt_3()
diff --git a/HySoP/hysop/operator/tests/test_adaptive_time_step_flymake/p1/dt_adapt b/HySoP/hysop/operator/tests/test_adaptive_time_step_flymake/p1/dt_adapt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/HySoP/hysop/operator/tests/test_advec_scales.py b/HySoP/hysop/operator/tests/test_advec_scales.py
index cee3827b876e60465bfcbf5e5f65d5f3848bcfc7..f6c81dc029956659eaee451b90729918e05d1f99 100755
--- a/HySoP/hysop/operator/tests/test_advec_scales.py
+++ b/HySoP/hysop/operator/tests/test_advec_scales.py
@@ -12,23 +12,20 @@ from parmepy.operator.advection import Advection
 from parmepy.problem.simulation import Simulation
 import parmepy.tools.numpywrappers as npw
 
+from parmepy.tools.parameters import Discretization
+d3d = Discretization([17, 17, 17])
+
 
 def test_nullVelocity_m4():
     """Basic test with random velocity. Using M4prime"""
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar')
     scal_ref = Field(domain=box, name='Scalar_ref')
     velo = Field(domain=box, name='Velocity',
                  formula=lambda x, y, z, t: (0., 0., 0.), isVector=True,
                  doVectorize=True)
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
-                      method={Scales: 'p_M4'}
-                      )
-    advec_py = Advection(velo, scal_ref,
-                         resolutions={velo: [17, 17, 17],
-                                      scal_ref: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d, method={Scales: 'p_M4'})
+    advec_py = Advection(velo, scal_ref, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -37,16 +34,16 @@ def test_nullVelocity_m4():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
     scal_d = scal.discreteFields.values()[0]
     scal_ref_d = scal_ref.discreteFields.values()[0]
 
-    scal_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER)
+    scal_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape))
     scal_ref_d.data[0][...] = scal_d.data[0][...]
-    assert (velo.norm() == 0).all()
+    topo = scal_d.topology
+    assert (velo.norm(topo) == 0).all()
     advec.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
     advec_py.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
     print (np.max(np.abs(scal_ref_d.data[0] - scal_d.data[0])))
@@ -55,20 +52,14 @@ def test_nullVelocity_m4():
 
 def test_nullVelocity_vec_m4():
     """Basic test with random velocity and vector field. Using M4prime"""
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar', isVector=True)
     scal_ref = Field(domain=box, name='Scalar_ref', isVector=True)
     velo = Field(domain=box, name='Velocity',
                  formula=lambda x, y, z, t: (0., 0., 0.), isVector=True,
                  doVectorize=True)
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
-                      method={Scales: 'p_M4'}
-                      )
-    advec_py = Advection(velo, scal_ref,
-                         resolutions={velo: [17, 17, 17],
-                                      scal_ref: [17, 17, 17]},
+    advec = Advection(velo, scal, discretization=d3d, method={Scales: 'p_M4'})
+    advec_py = Advection(velo, scal_ref, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
@@ -77,21 +68,18 @@ def test_nullVelocity_vec_m4():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     scal_ref_d = scal_ref.discreteFields.values()[0]
 
-    scal_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER)
-    scal_d.data[1][...] = np.asarray(
-        np.random.random(scal_d.data[1].shape),
-        dtype=PARMES_REAL, order=ORDER)
-    scal_d.data[2][...] = np.asarray(
-        np.random.random(scal_d.data[2].shape),
-        dtype=PARMES_REAL, order=ORDER)
+    scal_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape))
+    scal_d.data[1][...] = npw.asrealarray(
+        np.random.random(scal_d.data[1].shape))
+    scal_d.data[2][...] = npw.asrealarray(
+        np.random.random(scal_d.data[2].shape))
     scal_ref_d.data[0][...] = scal_d.data[0][...]
     scal_ref_d.data[1][...] = scal_d.data[1][...]
     scal_ref_d.data[2][...] = scal_d.data[2][...]
@@ -106,22 +94,18 @@ def test_nullVelocity_vec_m4():
 
 def test_nullVelocity_m6():
     """Basic test with null velocity. Using M6prime"""
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar')
     velo = Field(domain=box, name='Velocity',
                  formula=lambda x, y, z, t: (0., 0., 0.), isVector=True,
                  doVectorize=True)
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
-                      method={Scales: 'p_M6'}
+    advec = Advection(velo, scal, discretization=d3d, method={Scales: 'p_M6'}
                       )
     advec.discretize()
-    advec.setUp()
+    advec.setup()
 
     scal_d = scal.discreteFields.values()[0]
-    scal_d.data[0][...] = np.asarray(np.random.random(scal_d.data[0].shape),
-                                     dtype=PARMES_REAL, order=ORDER)
+    scal_d.data[0][...] = np.asarray(np.random.random(scal_d.data[0].shape))
     scal_init = npw.copy(scal_d.data[0])
 
     advec.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
@@ -131,29 +115,23 @@ def test_nullVelocity_m6():
 
 def test_nullVelocity_vec_m6():
     """Basic test with null velocity and vector field. Using M6prime"""
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar', isVector=True)
     velo = Field(domain=box, name='Velocity',
                  formula=lambda x, y, z, t: (0., 0., 0.), isVector=True,
                  doVectorize=True)
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
-                      method={Scales: 'p_M6'}
+    advec = Advection(velo, scal, discretization=d3d, method={Scales: 'p_M6'}
                       )
     advec.discretize()
-    advec.setUp()
+    advec.setup()
 
     scal_d = scal.discreteFields.values()[0]
-    scal_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER)
-    scal_d.data[1][...] = np.asarray(
-        np.random.random(scal_d.data[1].shape),
-        dtype=PARMES_REAL, order=ORDER)
-    scal_d.data[2][...] = np.asarray(
-        np.random.random(scal_d.data[2].shape),
-        dtype=PARMES_REAL, order=ORDER)
+    scal_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape))
+    scal_d.data[1][...] = npw.asrealarray(
+        np.random.random(scal_d.data[1].shape))
+    scal_d.data[2][...] = npw.asrealarray(
+        np.random.random(scal_d.data[2].shape))
     scal_init0 = npw.copy(scal_d.data[0])
     scal_init1 = npw.copy(scal_d.data[1])
     scal_init2 = npw.copy(scal_d.data[2])
@@ -167,21 +145,15 @@ def test_nullVelocity_vec_m6():
 
 def test_nullVelocity_m8():
     """Basic test with random velocity. Using M4prime"""
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar')
     scal_ref = Field(domain=box, name='Scalar_ref')
     velo = Field(domain=box, name='Velocity',
                  formula=lambda x, y, z, t: (0., 0., 0.), isVector=True,
                  doVectorize=True)
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
-                      method={Scales: 'p_M8'}
+    advec = Advection(velo, scal, discretization=d3d, method={Scales: 'p_M8'}
                       )
-    advec_py = Advection(velo, scal_ref,
-                         resolutions={velo: [17, 17, 17],
-                                      scal_ref: [17, 17, 17]},
-                         method={TimeIntegrator: RK2,
+    advec_py = Advection(velo, scal_ref, discretization=d3d, method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
                                  Splitting: 'o2_FullHalf',
@@ -189,15 +161,14 @@ def test_nullVelocity_m8():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
     topo = advec.discreteFields[velo].topology
     scal_d = scal.discreteFields[topo]
     scal_ref_d = scal_ref.discreteFields[topo]
 
-    scal_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER)
+    scal_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape))
     scal_ref_d.data[0][...] = scal_d.data[0]
 
     advec.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
@@ -208,21 +179,15 @@ def test_nullVelocity_m8():
 
 def test_nullVelocity_vec_m8():
     """Basic test with random velocity and vector field. Using M4prime"""
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar', isVector=True)
     scal_ref = Field(domain=box, name='Scalar_ref', isVector=True)
     velo = Field(domain=box, name='Velocity',
                  formula=lambda x, y, z, t: (0., 0., 0.), isVector=True,
                  doVectorize=True)
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
-                      method={Scales: 'p_M8'}
+    advec = Advection(velo, scal, discretization=d3d, method={Scales: 'p_M8'}
                       )
-    advec_py = Advection(velo, scal_ref,
-                         resolutions={velo: [17, 17, 17],
-                                      scal_ref: [17, 17, 17]},
-                         method={TimeIntegrator: RK2,
+    advec_py = Advection(velo, scal_ref, discretization=d3d, method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
                                  Splitting: 'o2_FullHalf',
@@ -230,14 +195,15 @@ def test_nullVelocity_vec_m8():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
-    assert (velo.norm() == 0).all()
+    advec.setup()
+    advec_py.setup()
     scal_d = scal.discreteFields.values()[0]
     scal_ref_d = scal_ref.discreteFields.values()[0]
+    topo = scal_d.topology
+    assert (velo.norm(topo) == 0).all()
     for i in xrange(box.dimension):
         scal_d.data[i][...] = \
-            npw.realarray(np.random.random(scal_d.data[i].shape))
+            npw.asrealarray(np.random.random(scal_d.data[i].shape))
         scal_ref_d.data[i][...] = scal_d.data[i][...]
 
     advec.apply(Simulation(tinit=0., tend=0.075, nbIter=1))
@@ -249,19 +215,13 @@ def test_nullVelocity_vec_m8():
 
 def _randomVelocity_m4():
     """Basic test with random velocity. Using M4prime"""
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar')
     scal_ref = Field(domain=box, name='Scalar_ref')
     velo = Field(domain=box, name='Velocity', isVector=True)
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
-                      method={Scales: 'p_M4'}
+    advec = Advection(velo, scal, discretization=d3d, method={Scales: 'p_M4'}
                       )
-    advec_py = Advection(velo, scal_ref,
-                         resolutions={velo: [17, 17, 17],
-                                      scal_ref: [17, 17, 17]},
-                         method={TimeIntegrator: RK2,
+    advec_py = Advection(velo, scal_ref, discretization=d3d, method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
                                  Splitting: 'o2_FullHalf',
@@ -269,26 +229,22 @@ def _randomVelocity_m4():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     scal_ref_d = scal_ref.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
 
-    scal_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER)
+    scal_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape))
     scal_ref_d.data[0][...] = scal_d.data[0][...]
-    velo_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
-    velo_d.data[2][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
+    velo_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape)) / (2. * scal_d.resolution[0])
+    velo_d.data[1][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape)) / (2. * scal_d.resolution[1])
+    velo_d.data[2][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape)) / (2. * scal_d.resolution[1])
 
     advec.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
     advec_py.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
@@ -298,19 +254,13 @@ def _randomVelocity_m4():
 
 def _randomVelocity_vec_m4():
     """Basic test with random velocity vector Field. Using M4prime"""
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar', isVector=True)
     scal_ref = Field(domain=box, name='Scalar_ref', isVector=True)
     velo = Field(domain=box, name='Velocity', isVector=True)
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
-                      method={Scales: 'p_M4'}
+    advec = Advection(velo, scal, discretization=d3d, method={Scales: 'p_M4'}
                       )
-    advec_py = Advection(velo, scal_ref,
-                         resolutions={velo: [17, 17, 17],
-                                      scal_ref: [17, 17, 17]},
-                         method={TimeIntegrator: RK2,
+    advec_py = Advection(velo, scal_ref, discretization=d3d, method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L2_1,
                                  Splitting: 'o2_FullHalf',
@@ -318,34 +268,28 @@ def _randomVelocity_vec_m4():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     scal_ref_d = scal_ref.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
 
-    scal_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER)
-    scal_d.data[1][...] = np.asarray(
-        np.random.random(scal_d.data[1].shape),
-        dtype=PARMES_REAL, order=ORDER)
-    scal_d.data[2][...] = np.asarray(
-        np.random.random(scal_d.data[2].shape),
-        dtype=PARMES_REAL, order=ORDER)
+    scal_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape))
+    scal_d.data[1][...] = npw.asrealarray(
+        np.random.random(scal_d.data[1].shape))
+    scal_d.data[2][...] = npw.asrealarray(
+        np.random.random(scal_d.data[2].shape))
     scal_ref_d.data[0][...] = scal_d.data[0][...]
     scal_ref_d.data[1][...] = scal_d.data[1][...]
     scal_ref_d.data[2][...] = scal_d.data[2][...]
-    velo_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = np.asarray(
-        np.random.random(scal_d.data[1].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
-    velo_d.data[2][...] = np.asarray(
-        np.random.random(scal_d.data[2].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
+    velo_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape)) / (2. * scal_d.resolution[0])
+    velo_d.data[1][...] = npw.asrealarray(
+        np.random.random(scal_d.data[1].shape)) / (2. * scal_d.resolution[1])
+    velo_d.data[2][...] = npw.asrealarray(
+        np.random.random(scal_d.data[2].shape)) / (2. * scal_d.resolution[1])
 
     advec.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
     advec_py.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
@@ -357,19 +301,13 @@ def _randomVelocity_vec_m4():
 
 def test_randomVelocity_m6():
     """Basic test with random velocity. Using M6prime"""
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar')
     scal_ref = Field(domain=box, name='Scalar_ref')
     velo = Field(domain=box, name='Velocity', isVector=True)
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
-                      method={Scales: 'p_M6'}
+    advec = Advection(velo, scal, discretization=d3d, method={Scales: 'p_M6'}
                       )
-    advec_py = Advection(velo, scal_ref,
-                         resolutions={velo: [17, 17, 17],
-                                      scal_ref: [17, 17, 17]},
-                         method={TimeIntegrator: RK2,
+    advec_py = Advection(velo, scal_ref, discretization=d3d, method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
                                  Splitting: 'o2_FullHalf',
@@ -377,26 +315,22 @@ def test_randomVelocity_m6():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     scal_ref_d = scal_ref.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
 
-    scal_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER)
+    scal_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape))
     scal_ref_d.data[0][...] = scal_d.data[0][...]
-    velo_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
-    velo_d.data[2][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
+    velo_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape)) / (2. * scal_d.resolution[0])
+    velo_d.data[1][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape)) / (2. * scal_d.resolution[1])
+    velo_d.data[2][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape)) / (2. * scal_d.resolution[1])
 
     advec.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
     advec_py.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
@@ -406,19 +340,13 @@ def test_randomVelocity_m6():
 
 def test_randomVelocity_vec_m6():
     """Basic test with random velocity vector Field. Using M6prime"""
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar', isVector=True)
     scal_ref = Field(domain=box, name='Scalar_ref', isVector=True)
     velo = Field(domain=box, name='Velocity', isVector=True)
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
-                      method={Scales: 'p_M6'}
+    advec = Advection(velo, scal, discretization=d3d, method={Scales: 'p_M6'}
                       )
-    advec_py = Advection(velo, scal_ref,
-                         resolutions={velo: [17, 17, 17],
-                                      scal_ref: [17, 17, 17]},
-                         method={TimeIntegrator: RK2,
+    advec_py = Advection(velo, scal_ref, discretization=d3d, method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: L4_2,
                                  Splitting: 'o2_FullHalf',
@@ -426,34 +354,28 @@ def test_randomVelocity_vec_m6():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     scal_ref_d = scal_ref.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
 
-    scal_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER)
-    scal_d.data[1][...] = np.asarray(
-        np.random.random(scal_d.data[1].shape),
-        dtype=PARMES_REAL, order=ORDER)
-    scal_d.data[2][...] = np.asarray(
-        np.random.random(scal_d.data[2].shape),
-        dtype=PARMES_REAL, order=ORDER)
+    scal_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape))
+    scal_d.data[1][...] = npw.asrealarray(
+        np.random.random(scal_d.data[1].shape))
+    scal_d.data[2][...] = npw.asrealarray(
+        np.random.random(scal_d.data[2].shape))
     scal_ref_d.data[0][...] = scal_d.data[0][...]
     scal_ref_d.data[1][...] = scal_d.data[1][...]
     scal_ref_d.data[2][...] = scal_d.data[2][...]
-    velo_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = np.asarray(
-        np.random.random(scal_d.data[1].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
-    velo_d.data[2][...] = np.asarray(
-        np.random.random(scal_d.data[2].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
+    velo_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape)) / (2. * scal_d.resolution[0])
+    velo_d.data[1][...] = npw.asrealarray(
+        np.random.random(scal_d.data[1].shape)) / (2. * scal_d.resolution[1])
+    velo_d.data[2][...] = npw.asrealarray(
+        np.random.random(scal_d.data[2].shape)) / (2. * scal_d.resolution[1])
 
     advec.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
     advec_py.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
@@ -465,18 +387,13 @@ def test_randomVelocity_vec_m6():
 
 def test_randomVelocity_m8():
     """Basic test with random velocity. Using M8prime"""
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar')
     scal_ref = Field(domain=box, name='Scalar_ref')
     velo = Field(domain=box, name='Velocity', isVector=True)
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
-                      method={Scales: 'p_M8'}
+    advec = Advection(velo, scal, discretization=d3d, method={Scales: 'p_M8'}
                       )
-    advec_py = Advection(velo, scal_ref,
-                         resolutions={velo: [17, 17, 17],
-                                      scal_ref: [17, 17, 17]},
+    advec_py = Advection(velo, scal_ref, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -485,26 +402,22 @@ def test_randomVelocity_m8():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     scal_ref_d = scal_ref.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
 
-    scal_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER)
+    scal_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape))
     scal_ref_d.data[0][...] = scal_d.data[0][...]
-    velo_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
-    velo_d.data[2][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
+    velo_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape)) / (2. * scal_d.resolution[0])
+    velo_d.data[1][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape)) / (2. * scal_d.resolution[1])
+    velo_d.data[2][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape)) / (2. * scal_d.resolution[1])
 
     advec.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
     advec_py.apply(Simulation(tinit=0., tend=0.1, nbIter=1))
@@ -514,18 +427,13 @@ def test_randomVelocity_m8():
 
 def test_randomVelocity_vec_m8():
     """Basic test with random velocity vector Field. Using M8prime"""
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
     scal = Field(domain=box, name='Scalar', isVector=True)
     scal_ref = Field(domain=box, name='Scalar_ref', isVector=True)
     velo = Field(domain=box, name='Velocity', isVector=True)
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
-                      method={Scales: 'p_M8'}
+    advec = Advection(velo, scal, discretization=d3d, method={Scales: 'p_M8'}
                       )
-    advec_py = Advection(velo, scal_ref,
-                         resolutions={velo: [17, 17, 17],
-                                      scal_ref: [17, 17, 17]},
+    advec_py = Advection(velo, scal_ref, discretization=d3d,
                          method={TimeIntegrator: RK2,
                                  Interpolation: Linear,
                                  Remesh: M8Prime,
@@ -534,34 +442,28 @@ def test_randomVelocity_vec_m8():
                          )
     advec.discretize()
     advec_py.discretize()
-    advec.setUp()
-    advec_py.setUp()
+    advec.setup()
+    advec_py.setup()
 
     scal_d = scal.discreteFields.values()[0]
     scal_ref_d = scal_ref.discreteFields.values()[0]
     velo_d = velo.discreteFields.values()[0]
 
-    scal_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER)
-    scal_d.data[1][...] = np.asarray(
-        np.random.random(scal_d.data[1].shape),
-        dtype=PARMES_REAL, order=ORDER)
-    scal_d.data[2][...] = np.asarray(
-        np.random.random(scal_d.data[2].shape),
-        dtype=PARMES_REAL, order=ORDER)
+    scal_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape))
+    scal_d.data[1][...] = npw.asrealarray(
+        np.random.random(scal_d.data[1].shape))
+    scal_d.data[2][...] = npw.asrealarray(
+        np.random.random(scal_d.data[2].shape))
     scal_ref_d.data[0][...] = scal_d.data[0][...]
     scal_ref_d.data[1][...] = scal_d.data[1][...]
     scal_ref_d.data[2][...] = scal_d.data[2][...]
-    velo_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = np.asarray(
-        np.random.random(scal_d.data[1].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
-    velo_d.data[2][...] = np.asarray(
-        np.random.random(scal_d.data[2].shape),
-        dtype=PARMES_REAL, order=ORDER) / (2. * scal_d.resolution[1])
+    velo_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape)) / (2. * scal_d.resolution[0])
+    velo_d.data[1][...] = npw.asrealarray(
+        np.random.random(scal_d.data[1].shape)) / (2. * scal_d.resolution[1])
+    velo_d.data[2][...] = npw.asrealarray(
+        np.random.random(scal_d.data[2].shape)) / (2. * scal_d.resolution[1])
 
     advec.apply(Simulation(tinit=0., tend=0.01, nbIter=1))
     advec_py.apply(Simulation(tinit=0., tend=0.01, nbIter=1))
diff --git a/HySoP/hysop/operator/tests/test_analytic.py b/HySoP/hysop/operator/tests/test_analytic.py
index d3ad803097247ad14ae102fb12f005a9ed925431..bb627bcfe662fb4e15fae97ac625abb6e449a10d 100644
--- a/HySoP/hysop/operator/tests/test_analytic.py
+++ b/HySoP/hysop/operator/tests/test_analytic.py
@@ -1,360 +1,34 @@
 """
 @file parmepy.operator.tests.test_analytic
 Test initialization of fields with analytic formula
----> either field.initialize or analytic operator description.
 """
-import numpy as np
+from numpy import allclose
 from parmepy.domain.box import Box
 from parmepy.fields.continuous import Field
 from parmepy.operator.analytic import Analytic
 from parmepy.problem.simulation import Simulation
-from parmepy.mpi.topology import Cartesian
-
-
-def func_scal_1(res, x, y, z, t):
-    res[0][...] = x - 0.1 * y + 10. * z * z * t
-    return res
-
-
-def func_scal_2(x, y, z, t):
-    f = x - 0.1 * y + 10. * z * z * t
-    return f
-
-
-def func_vec_1(res, x, y, z, t):
-    res[0][...] = x
-    res[1][...] = 0.1 * y
-    res[2][...] = 10. * z * z
-    return res
-
-
-def func_vec_2(x, y, z, t):
-    f_x = x
-    f_y = 0.1 * y
-    f_z = 10. * z * z
-    return f_x, f_y, f_z
-
-
-def func_vec_3(res, x, y, z, t, theta):
-    res[0][...] = x + theta
-    res[1][...] = 0.1 * y
-    res[2][...] = 10. * z * z
-    return res
-
-
-def func_vec_4(x, y, z, t, theta):
-    f_x = x + theta
-    f_y = 0.1 * y
-    f_z = 10. * z * z
-    return f_x, f_y, f_z
-
-
-def func_vec_5(res, x, y, z, t, theta):
-    res[0][...] = x + theta
-    res[1][...] = 0.1 * y
-    res[2][...] = 10. * z * z
-    res[3][...] = theta * z
-    return res
-
-
-def func_vec_6(res, x, y, t, theta):
-    res[0][...] = x + theta
-    res[1][...] = 0.1 * y
-    res[2][...] = 10. * y
-    res[3][...] = theta * t
-    return res
-
-
-res3D = [33, 33, 33]
-res2D = [33, 33]
+from parmepy.tools.parameters import Discretization
+from parmepy.fields.tests.func_for_tests import func_scal_1, func_scal_2, \
+    func_vec_1, func_vec_2, func_vec_3, func_vec_4, func_vec_5, func_vec_6
+d3D = Discretization([33, 33, 33])
+d2D = Discretization([33, 33])
 L2D = [1., 1.]
 origin2D = [0., 0.]
 nbc = 4
 simu = Simulation(tinit=0., tend=0.1, nbIter=1)
 
-## I - Tests with direct calls of field.initialize
-
-
-# Non-Vectorized formula for a scalar
-def test_analytical_field_1():
-    box = Box()
-    topo = Cartesian(box, 3, res3D)
-    coords = topo.mesh.coords
-    caf = Field(box, formula=func_scal_1)
-    ref = Field(box)
-    refd = ref.discretize(topo)
-    cafd = caf.discretize(topo)
-    refd = ref.discretize(topo)
-    ids = id(cafd.data[0])
-    caf.initialize()
-    refd.data = func_scal_1(refd.data, *(coords + (0.,)))
-    assert np.allclose(cafd[0], refd.data[0])
-    assert id(cafd.data[0]) == ids
-    time = 3.0
-    caf.initialize(currentTime=time)
-    refd.data = func_scal_1(refd.data, *(coords + (time,)))
-    assert np.allclose(cafd[0], refd.data[0])
-    assert id(cafd.data[0]) == ids
-
-
-# Vectorized formula
-def test_analytical_field_2():
-    box = Box()
-    topo = Cartesian(box, 3, res3D)
-    coords = topo.mesh.coords
-    caf = Field(box, formula=func_scal_2, doVectorize=True)
-    ref = Field(box)
-    cafd = caf.discretize(topo)
-    ids = id(cafd.data[0])
-    refd = ref.discretize(topo)
-    caf.initialize()
-    refd.data = func_scal_1(refd.data, *(coords + (0.,)))
-    assert np.allclose(cafd[0], refd.data[0])
-    assert id(cafd.data[0]) == ids
-    time = 3.0
-    caf.initialize(currentTime=time)
-    refd.data = func_scal_1(refd.data, *(coords + (time,)))
-    assert np.allclose(cafd[0], refd.data[0])
-    assert id(cafd.data[0]) == ids
-
-
-# Non-Vectorized formula for a vector
-def test_analytical_field_3():
-    box = Box()
-    topo = Cartesian(box, 3, res3D)
-    coords = topo.mesh.coords
-    caf = Field(box, formula=func_vec_1, isVector=True)
-    ref = Field(box, isVector=True)
-    refd = ref.discretize(topo)
-    cafd = caf.discretize(topo)
-    refd = ref.discretize(topo)
-    ids = np.zeros((3))
-    for i in xrange(3):
-        ids[i] = id(cafd.data[i])
-    caf.initialize()
-    refd.data = func_vec_1(refd.data, *(coords + (0.,)))
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-    time = 3.0
-    caf.initialize(currentTime=time)
-    refd.data = func_vec_1(refd.data, *(coords + (time,)))
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-
-
-# Vectorized formula for a vector
-def test_analytical_field_4():
-    box = Box()
-    topo = Cartesian(box, 3, res3D)
-    coords = topo.mesh.coords
-    caf = Field(box, formula=func_vec_2, isVector=True, doVectorize=True)
-    ref = Field(box, isVector=True)
-    refd = ref.discretize(topo)
-    cafd = caf.discretize(topo)
-    refd = ref.discretize(topo)
-    ids = np.zeros((3))
-    for i in xrange(3):
-        ids[i] = id(cafd.data[i])
-    caf.initialize()
-    refd.data = func_vec_1(refd.data, *(coords + (0.,)))
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-    time = 3.0
-    caf.initialize(currentTime=time)
-    refd.data = func_vec_1(refd.data, *(coords + (time,)))
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-
-
-# Non-Vectorized formula for a vector, with extra-arguments
-def test_analytical_field_5():
-    box = Box()
-    topo = Cartesian(box, 3, res3D)
-    coords = topo.mesh.coords
-    caf = Field(box, formula=func_vec_3, isVector=True)
-    theta = 0.3
-    caf.setExtraParameters(theta)
-    ref = Field(box, isVector=True)
-    refd = ref.discretize(topo)
-    cafd = caf.discretize(topo)
-    refd = ref.discretize(topo)
-    ids = np.zeros((3))
-    for i in xrange(3):
-        ids[i] = id(cafd.data[i])
-    caf.initialize()
-    refd.data = func_vec_3(refd.data, *(coords + (0., theta)))
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-    time = 3.0
-    caf.initialize(currentTime=time)
-    refd.data = func_vec_3(refd.data, *(coords + (time, theta)))
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-
-
-# Vectorized formula for a vector, with extra-arguments
-def test_analytical_field_6():
-    box = Box()
-    topo = Cartesian(box, 3, res3D)
-    coords = topo.mesh.coords
-    caf = Field(box, formula=func_vec_4, isVector=True, doVectorize=True)
-    theta = 0.3
-    caf.setExtraParameters(theta)
-    ref = Field(box, isVector=True)
-    refd = ref.discretize(topo)
-    cafd = caf.discretize(topo)
-    refd = ref.discretize(topo)
-    ids = np.zeros((3))
-    for i in xrange(3):
-        ids[i] = id(cafd.data[i])
-    caf.initialize()
-    refd.data = func_vec_3(refd.data, *(coords + (0., theta)))
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-    time = 3.0
-    caf.initialize(currentTime=time)
-    refd.data = func_vec_3(refd.data, *(coords + (time, theta)))
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-
-
-# Non-Vectorized formula for a field with nbComponents
-# different from domain dim and  with extra-arguments
-def test_analytical_field_7():
-    box = Box()
-    topo = Cartesian(box, 3, res3D)
-    coords = topo.mesh.coords
-    caf = Field(box, formula=func_vec_5, nbComponents=nbc)
-    theta = 0.3
-    caf.setExtraParameters(theta)
-    ref = Field(box, nbComponents=nbc)
-    refd = ref.discretize(topo)
-    cafd = caf.discretize(topo)
-    refd = ref.discretize(topo)
-    ids = np.zeros((nbc))
-    for i in xrange(nbc):
-        ids[i] = id(cafd.data[i])
-
-    caf.initialize()
-    refd.data = func_vec_5(refd.data, *(coords + (0., theta)))
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-    time = 3.0
-    caf.initialize(currentTime=time)
-    refd.data = func_vec_5(refd.data, *(coords + (time, theta)))
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-
-
-# Non-Vectorized formula for a 2D field with nbComponents
-# different from domain dim and  with extra-arguments
-def test_analytical_field_8():
-    box = Box(dimension=2, length=L2D, origin=origin2D)
-    topo = Cartesian(box, 2, res2D)
-    coords = topo.mesh.coords
-    caf = Field(box, formula=func_vec_6, nbComponents=nbc)
-    theta = 0.3
-    caf.setExtraParameters(theta)
-    ref = Field(box, nbComponents=nbc)
-    refd = ref.discretize(topo)
-    cafd = caf.discretize(topo)
-    refd = ref.discretize(topo)
-    ids = np.zeros((nbc))
-    for i in xrange(nbc):
-        ids[i] = id(cafd.data[i])
-    caf.initialize()
-    refd.data = func_vec_6(refd.data, *(coords + (0., theta)))
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-    time = 3.0
-    caf.initialize(currentTime=time)
-    refd.data = func_vec_6(refd.data, *(coords + (time, theta)))
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-
-# Non-Vectorized formula for a vector, initialization on several
-# topologies.
-def test_analytical_field_9():
-    box = Box()
-    topo = Cartesian(box, 3, res3D)
-    res2 = [65, 33, 65]
-    topo2 = Cartesian(box, 2, res2, ghosts=[1, 1, 1])
-    coords = topo.mesh.coords
-    coords2 = topo2.mesh.coords
-    caf = Field(box, formula=func_vec_1, isVector=True)
-    ref = Field(box, isVector=True)
-    refd = ref.discretize(topo)
-    cafd = caf.discretize(topo)
-    cafd2 = caf.discretize(topo2)
-    refd2 = ref.discretize(topo2)
-    ids = np.zeros((3))
-    for i in xrange(3):
-        ids[i] = id(cafd2.data[i])
-        # init on topo2
-    caf.initialize(topo=topo2)
-    refd2.data = func_vec_1(refd2.data, *(coords2 + (0.,)))
-    refd.data = func_vec_1(refd.data, *(coords + (0.,)))
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd2[i], refd2.data[i])
-        assert id(cafd2.data[i]) == ids[i]
-        assert not np.allclose(cafd[i], refd.data[i])
-    caf.initialize(topo=topo)
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
-
-# Non-Vectorized formula for a vector, initialization on several
-# topologies.
-def test_analytical_field_10():
-    box = Box()
-    topo = Cartesian(box, 3, res3D)
-    res2 = [65, 33, 65]
-    topo2 = Cartesian(box, 2, res2, ghosts=[1, 1, 1])
-    coords = topo.mesh.coords
-    coords2 = topo2.mesh.coords
-    caf = Field(box, formula=func_vec_1, isVector=True)
-    ref = Field(box, isVector=True)
-    refd = ref.discretize(topo)
-    cafd = caf.discretize(topo)
-    cafd2 = caf.discretize(topo2)
-    refd2 = ref.discretize(topo2)
-    ids = np.zeros((3))
-    for i in xrange(3):
-        ids[i] = id(cafd2.data[i])
-    # init on all topos
-    caf.initialize()
-    refd2.data = func_vec_1(refd2.data, *(coords2 + (0.,)))
-    refd.data = func_vec_1(refd.data, *(coords + (0.,)))
-    for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd2[i], refd2.data[i])
-        assert np.allclose(cafd[i], refd.data[i])
-        assert id(cafd2.data[i]) == ids[i]
-
-
-## II - Same initializations as I but with call to analytic operator.
 
 # Non-Vectorized and vectorized formulas for a scalar
 def test_analytical_op_1():
     box = Box()
     caf = Field(box, formula=func_scal_1)
     caf2 = Field(box, formula=func_scal_2, doVectorize=True)
-    op = Analytic(variables=caf, resolutions={caf: res3D})
-    op2 = Analytic(variables=caf2, resolutions={caf2: res3D})
+    op = Analytic(variables={caf: d3D})
+    op2 = Analytic(variables={caf2: d3D})
     op.discretize()
     op2.discretize()
-    op.setUp()
-    op2.setUp()
+    op.setup()
+    op2.setup()
     topo = op.discreteFields[caf].topology
     coords = topo.mesh.coords
     ref = Field(box)
@@ -366,9 +40,9 @@ def test_analytical_op_1():
     op.apply(simu)
     op2.apply(simu)
     refd.data = func_scal_1(refd.data, *(coords + (simu.time,)))
-    assert np.allclose(cafd[0], refd.data[0])
+    assert allclose(cafd[0], refd.data[0])
     assert id(cafd.data[0]) == ids
-    assert np.allclose(cafd2[0], refd.data[0])
+    assert allclose(cafd2[0], refd.data[0])
     assert id(cafd2.data[0]) == ids2
 
 
@@ -377,20 +51,20 @@ def test_analytical_op_3():
     box = Box()
     caf = Field(box, formula=func_vec_1, isVector=True)
     caf2 = Field(box, formula=func_vec_2, doVectorize=True, isVector=True)
-    op = Analytic(variables=caf, resolutions={caf: res3D})
-    op2 = Analytic(variables=caf2, resolutions={caf2: res3D})
+    op = Analytic(variables={caf: d3D})
+    op2 = Analytic(variables={caf2: d3D})
     op.discretize()
     op2.discretize()
-    op.setUp()
-    op2.setUp()
+    op.setup()
+    op2.setup()
     topo = op.discreteFields[caf].topology
     coords = topo.mesh.coords
     ref = Field(box, isVector=True)
     refd = ref.discretize(topo)
     cafd = caf.discreteFields[topo]
     cafd2 = caf2.discreteFields[topo]
-    ids = np.zeros((3))
-    ids2 = np.zeros((3))
+    ids = [0, ] * 3
+    ids2 = [0, ] * 3
     for i in xrange(3):
         ids[i] = id(cafd.data[i])
         ids2[i] = id(cafd2.data[i])
@@ -398,9 +72,9 @@ def test_analytical_op_3():
     op2.apply(simu)
     refd.data = func_vec_1(refd.data, *(coords + (simu.time,)))
     for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
+        assert allclose(cafd[i], refd.data[i])
         assert id(cafd.data[i]) == ids[i]
-        assert np.allclose(cafd2[i], refd.data[i])
+        assert allclose(cafd2[i], refd.data[i])
         assert id(cafd2.data[i]) == ids2[i]
 
 
@@ -409,20 +83,20 @@ def test_analytical_op_4():
     box = Box()
     caf = Field(box, formula=func_vec_3, isVector=True)
     caf2 = Field(box, formula=func_vec_4, doVectorize=True, isVector=True)
-    op = Analytic(variables=caf, resolutions={caf: res3D})
-    op2 = Analytic(variables=caf2, resolutions={caf2: res3D})
+    op = Analytic(variables={caf: d3D})
+    op2 = Analytic(variables={caf2: d3D})
     op.discretize()
     op2.discretize()
-    op.setUp()
-    op2.setUp()
+    op.setup()
+    op2.setup()
     topo = op.discreteFields[caf].topology
     coords = topo.mesh.coords
     ref = Field(box, isVector=True)
     refd = ref.discretize(topo)
     cafd = caf.discreteFields[topo]
     cafd2 = caf2.discreteFields[topo]
-    ids = np.zeros((3))
-    ids2 = np.zeros((3))
+    ids = [0, ] * 3
+    ids2 = [0, ] * 3
     for i in xrange(3):
         ids[i] = id(cafd.data[i])
         ids2[i] = id(cafd2.data[i])
@@ -433,9 +107,9 @@ def test_analytical_op_4():
     op2.apply(simu)
     refd.data = func_vec_3(refd.data, *(coords + (simu.time, theta)))
     for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
+        assert allclose(cafd[i], refd.data[i])
         assert id(cafd.data[i]) == ids[i]
-        assert np.allclose(cafd2[i], refd.data[i])
+        assert allclose(cafd2[i], refd.data[i])
         assert id(cafd2.data[i]) == ids2[i]
 
 
@@ -443,15 +117,15 @@ def test_analytical_op_4():
 def test_analytical_op_5():
     box = Box()
     caf = Field(box, formula=func_vec_5, nbComponents=nbc)
-    op = Analytic(variables=caf, resolutions={caf: res3D})
+    op = Analytic(variables={caf: d3D})
     op.discretize()
-    op.setUp()
+    op.setup()
     topo = op.discreteFields[caf].topology
     coords = topo.mesh.coords
     ref = Field(box, nbComponents=nbc)
     refd = ref.discretize(topo)
     cafd = caf.discreteFields[topo]
-    ids = np.zeros((nbc))
+    ids = [0, ] * nbc
     for i in xrange(nbc):
         ids[i] = id(cafd.data[i])
     theta = 3.
@@ -459,7 +133,7 @@ def test_analytical_op_5():
     op.apply(simu)
     refd.data = func_vec_5(refd.data, *(coords + (simu.time, theta)))
     for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
+        assert allclose(cafd[i], refd.data[i])
         assert id(cafd.data[i]) == ids[i]
 
 
@@ -467,15 +141,15 @@ def test_analytical_op_5():
 def test_analytical_op_6():
     box = Box(dimension=2, length=L2D, origin=origin2D)
     caf = Field(box, formula=func_vec_6, nbComponents=nbc)
-    op = Analytic(variables=caf, resolutions={caf: res2D})
+    op = Analytic(variables={caf: d2D})
     op.discretize()
-    op.setUp()
+    op.setup()
     topo = op.discreteFields[caf].topology
     coords = topo.mesh.coords
     ref = Field(box, nbComponents=nbc)
     refd = ref.discretize(topo)
     cafd = caf.discreteFields[topo]
-    ids = np.zeros((nbc))
+    ids = [0, ] * nbc
     for i in xrange(nbc):
         ids[i] = id(cafd.data[i])
     theta = 3.
@@ -483,18 +157,11 @@ def test_analytical_op_6():
     op.apply(simu)
     refd.data = func_vec_6(refd.data, *(coords + (simu.time, theta)))
     for i in xrange(caf.nbComponents):
-        assert np.allclose(cafd[i], refd.data[i])
+        assert allclose(cafd[i], refd.data[i])
         assert id(cafd.data[i]) == ids[i]
 
+
 if __name__ == "__main__":
-    test_analytical_field_1()
-    test_analytical_field_2()
-    test_analytical_field_3()
-    test_analytical_field_4()
-    test_analytical_field_5()
-    test_analytical_field_6()
-    test_analytical_field_7()
-    test_analytical_field_8()
     test_analytical_op_1()
     test_analytical_op_3()
     test_analytical_op_4()
diff --git a/HySoP/hysop/operator/tests/test_density.py b/HySoP/hysop/operator/tests/test_density.py
new file mode 100644
index 0000000000000000000000000000000000000000..0fbba41cc34dfae9cd4af2e2b6933f0e37d5b6af
--- /dev/null
+++ b/HySoP/hysop/operator/tests/test_density.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+import parmepy as pp
+from parmepy.operator.density import DensityVisco
+from parmepy.problem.simulation import Simulation
+from parmepy.tools.parameters import Discretization
+from parmepy import Field
+
+d3d = Discretization([129, 129, 129])
+
+
+def computeVisco(res, x, y, z, t):
+    res[0][...] = x + y + z * t
+
+
+def test_density():
+    """
+    Todo : write proper tests.
+    Here we just check if discr/setup/apply process goes well.
+    """
+    box = pp.Box(length=[2., 1., 0.9], origin=[0.0, -1., -0.43])
+    density = Field(domain=box, name='density')
+    viscosity = Field(domain=box, formula=computeVisco, name='visco')
+    op = DensityVisco(density, viscosity, discretization=d3d)
+    op.discretize()
+    op.setup()
+    topo = op.variables[viscosity]
+    viscosity.initialize(topo=topo)
+    simu = Simulation(nbIter=2)
+    op.apply(simu)
+
+if __name__ == "__main__":
+    test_density()
diff --git a/HySoP/hysop/operator/tests/test_diff_poisson_3D.py b/HySoP/hysop/operator/tests/test_diff_poisson_3D.py
index 0dbaf793ee47dc39635a6deedc9997522dac2dde..bb42d4e2b0922bb7865bc06dbef68bd5e7a03637 100755
--- a/HySoP/hysop/operator/tests/test_diff_poisson_3D.py
+++ b/HySoP/hysop/operator/tests/test_diff_poisson_3D.py
@@ -39,6 +39,7 @@ def computeVort(x, y, z):
 def test_Diff_Poisson():
     # Parameters
     nb = 33
+<<<<<<< HEAD
     dim = 3
     boxLength = [1., 1., 1.]
     boxMin = [0., 0., 0.]
@@ -46,6 +47,15 @@ def test_Diff_Poisson():
 
     ## Domain
     box = pp.Box(dim, length=boxLength, origin=boxMin)
+=======
+    boxLength = [1., 1., 1.]
+    boxMin = [0., 0., 0.]
+    from parmepy.tools.parameters import Discretization
+    d3D = Discretization([nb, nb, nb])
+
+    ## Domain
+    box = pp.Box(length=boxLength, origin=boxMin)
+>>>>>>> Parmepy v0
 
     ## Fields
     velo = pp.Field(domain=box, formula=computeVel,
@@ -54,6 +64,7 @@ def test_Diff_Poisson():
                      name='Vorticity', isVector=True)
 
     ## FFT Diffusion operators and FFT Poisson solver
+<<<<<<< HEAD
     diffusion = Diffusion(vorti, resolutions={vorti:nbElem},
                           viscosity=0.002
                           )
@@ -62,12 +73,22 @@ def test_Diff_Poisson():
                       resolutions={velo: nbElem,
                                    vorti: nbElem},
                       )
+=======
+    diffusion = Diffusion(variables={vorti: d3D}, viscosity=0.002)
+    poisson = Poisson(velo, vorti, discretization=d3D)
+>>>>>>> Parmepy v0
 
     diffusion.discretize()
     poisson.discretize()
 
+<<<<<<< HEAD
     diffusion.setUp()
     poisson.setUp()
+=======
+    diffusion.setup()
+    poisson.setup()
+
+>>>>>>> Parmepy v0
     simu = Simulation(tinit=0.0, tend=10., timeStep=0.002,
                       iterMax=1000000)
     diffusion.apply(simu)
diff --git a/HySoP/hysop/operator/tests/test_differential.py b/HySoP/hysop/operator/tests/test_differential.py
index 82555f83d721502f2e693ddccebfbb76bbaa14fe..c1b8d4cc829992d2597eab4fc4890308834eb07d 100644
--- a/HySoP/hysop/operator/tests/test_differential.py
+++ b/HySoP/hysop/operator/tests/test_differential.py
@@ -5,6 +5,7 @@ Tests for differential operators.
 import numpy as np
 from parmepy.domain.box import Box
 from parmepy.fields.continuous import Field
+<<<<<<< HEAD
 #from parmepy.numerics.differential_operations import DivT, GradVxW
 #import math as m
 
@@ -21,17 +22,46 @@ resol2 = [129, nb, nb]
 
 def callOp(DiffOperator, ref_formula, op_dim=box.dimension, method=None,
            order=4, dom=box, resolution=None):
+=======
+import parmepy.tools.numpywrappers as npw
+#from parmepy.numerics.differential_operations import DivT, GradVxW
+
+
+# Domain and topologies definitions
+nb = 65
+import math
+Lx = Ly = Lz = 2. * math.pi
+box = Box(length=[Lx, Ly, Lz], origin=[0., 0., 0.])
+from parmepy.tools.parameters import Discretization
+d3D = Discretization([nb, nb, nb], [2, 2, 2])
+d3D_nog = Discretization([nb, nb, nb])
+
+box2 = Box(length=[2. * Lx, Ly, Lz], origin=[0., 0., 0.])
+d3D_2 = Discretization([129, nb, nb])
+cos = np.cos
+sin = np.sin
+
+
+def callOp(DiffOperator, ref_formula, discretization,
+           op_dim=box.dimension, method=None,
+           order=4, dom=box):
+>>>>>>> Parmepy v0
     """Basic test for Grad operator using FD scheme
     (comparison with periodic analytical solution)
     """
     # Velocity and result fields
     velo = Field(domain=dom, formula=velocity_f, isVector=True)
     result = Field(domain=dom, nbComponents=op_dim)
+<<<<<<< HEAD
     if resolution is None:
         resolution = resol
     # Curl operator
     Op = DiffOperator(velo, result, resolutions={velo: resolution,
                                                  result: resolution},
+=======
+    # Curl operator
+    Op = DiffOperator(velo, result, discretization=discretization,
+>>>>>>> Parmepy v0
                       method=method)
 
     Op.discretize()
@@ -45,16 +75,26 @@ def callOp(DiffOperator, ref_formula, op_dim=box.dimension, method=None,
 
     # initialize velocity ...
     velo.initialize(topo=topo)
+<<<<<<< HEAD
     Op.setUp()
+=======
+    Op.setup()
+>>>>>>> Parmepy v0
 
     # Compute curl
     Op.apply()
 
     # Compare results with reference
     ind = topo.mesh.iCompute
+<<<<<<< HEAD
     err = np.zeros((dom.dimension), dtype=np.float64)
     for i in xrange(dom.dimension):
         err[i] = (dom.length[i] / (resolution[i] - 1)) ** order
+=======
+    err = npw.zeros((dom.dimension))
+    for i in xrange(dom.dimension):
+        err[i] = (dom.length[i] / (discretization.resolution[i] - 1)) ** order
+>>>>>>> Parmepy v0
     print ('==============')
     print (str(DiffOperator) + ' test')
     print ('==============')
@@ -63,30 +103,54 @@ def callOp(DiffOperator, ref_formula, op_dim=box.dimension, method=None,
         print (np.max(np.abs(res_d[i][ind] - ref_d[i][ind])))
         assert np.allclose(res_d[i][ind], ref_d[i][ind],
                            rtol=err[i % dom.dimension])
+<<<<<<< HEAD
     #Op.finalize()
 
 
 def velocity_f(res, x, y, z, t):
     res[0][...] = np.sin(x) * np.cos(y) * np.cos(z)
     res[1][...] = - np.cos(x) * np.sin(y) * np.cos(z)
+=======
+    Op.finalize()
+
+
+def velocity_f(res, x, y, z, t):
+    res[0][...] = sin(x) * cos(y) * cos(z)
+    res[1][...] = - cos(x) * sin(y) * cos(z)
+>>>>>>> Parmepy v0
     res[2][...] = 0.
     return res
 
 
 def vorticity_f(res, x, y, z, t):
+<<<<<<< HEAD
     res[0][...] = - np.cos(x) * np.sin(y) * np.sin(z)
     res[1][...] = - np.sin(x) * np.cos(y) * np.sin(z)
     res[2][...] = 2. * np.sin(x) * np.sin(y) * np.cos(z)
+=======
+    res[0][...] = - cos(x) * sin(y) * sin(z)
+    res[1][...] = - sin(x) * cos(y) * sin(z)
+    res[2][...] = 2. * sin(x) * sin(y) * cos(z)
+>>>>>>> Parmepy v0
     return res
 
 
 def grad_velo(res, x, y, z, t):
+<<<<<<< HEAD
     res[0][...] = np.cos(x) * np.cos(y) * np.cos(z)
     res[1][...] = -np.sin(x) * np.sin(y) * np.cos(z)
     res[2][...] = -np.sin(x) * np.cos(y) * np.sin(z)
     res[3][...] = np.sin(x) * np.sin(y) * np.cos(z)
     res[4][...] = - np.cos(x) * np.cos(y) * np.cos(z)
     res[5][...] = np.cos(x) * np.sin(y) * np.sin(z)
+=======
+    res[0][...] = cos(x) * cos(y) * cos(z)
+    res[1][...] = -sin(x) * sin(y) * cos(z)
+    res[2][...] = -sin(x) * cos(y) * sin(z)
+    res[3][...] = sin(x) * sin(y) * cos(z)
+    res[4][...] = - cos(x) * cos(y) * cos(z)
+    res[5][...] = cos(x) * sin(y) * sin(z)
+>>>>>>> Parmepy v0
     res[6][...] = 0.0
     res[7][...] = 0.0
     res[8][...] = 0.0
@@ -98,7 +162,11 @@ def test_CurlFD():
     from parmepy.methods import FD_C_4
     from parmepy.operator.differential import Curl
     method = {SpaceDiscretisation: FD_C_4}
+<<<<<<< HEAD
     callOp(Curl, vorticity_f, method=method, resolution=resol)
+=======
+    callOp(Curl, vorticity_f, method=method, discretization=d3D)
+>>>>>>> Parmepy v0
 
 
 def test_CurlFD2():
@@ -106,14 +174,38 @@ def test_CurlFD2():
     from parmepy.methods import FD_C_2
     from parmepy.operator.differential import Curl
     method = {SpaceDiscretisation: FD_C_2}
+<<<<<<< HEAD
     callOp(Curl, vorticity_f, method=method, order=2, resolution=resol)
+=======
+    callOp(Curl, vorticity_f, method=method, order=2, discretization=d3D)
+>>>>>>> Parmepy v0
 
 
 def test_CurlFFT():
     from parmepy.methods_keys import SpaceDiscretisation
     from parmepy.operator.differential import Curl
     method = {SpaceDiscretisation: 'fftw'}
+<<<<<<< HEAD
     callOp(Curl, vorticity_f, method=method, order=6, resolution=resol)
+=======
+    callOp(Curl, vorticity_f, method=method, order=6, discretization=d3D_nog)
+
+
+#def test_CurlFFT_ghosts():
+#    from parmepy.methods_keys import SpaceDiscretisation
+#    from parmepy.operator.differential import Curl
+#    method = {SpaceDiscretisation: 'fftw'}
+#    callOp(Curl, vorticity_f, method=method, order=6, discretization=d3D)
+
+
+#def test_CurlFFT_2():
+#    from parmepy.methods_keys import SpaceDiscretisation
+#    from parmepy.operator.differential import Curl
+#    method = {SpaceDiscretisation: 'fftw'}
+#    discr = Discretization([129, nb, nb])
+#    callOp(Curl, vorticity_f, method=method, order=6, dom=box2,
+#           discretization=discr)
+>>>>>>> Parmepy v0
 
 
 def test_Grad():
@@ -121,7 +213,12 @@ def test_Grad():
     from parmepy.methods import FD_C_2
     method = {SpaceDiscretisation: FD_C_2}
     from parmepy.operator.differential import Grad
+<<<<<<< HEAD
     callOp(Grad, grad_velo, op_dim=9, method=method, order=2, resolution=resol)
+=======
+    callOp(Grad, grad_velo, op_dim=9, method=method, order=2,
+           discretization=d3D)
+>>>>>>> Parmepy v0
 
 
 def test_Grad2():
@@ -129,7 +226,11 @@ def test_Grad2():
     from parmepy.methods_keys import SpaceDiscretisation
     from parmepy.methods import FD_C_4
     method = {SpaceDiscretisation: FD_C_4}
+<<<<<<< HEAD
     callOp(Grad, grad_velo, op_dim=9, method=method, resolution=resol)
+=======
+    callOp(Grad, grad_velo, op_dim=9, method=method, discretization=d3D)
+>>>>>>> Parmepy v0
 
 
 def test_CurlFD_2():
@@ -137,7 +238,11 @@ def test_CurlFD_2():
     from parmepy.methods import FD_C_4
     from parmepy.operator.differential import Curl
     method = {SpaceDiscretisation: FD_C_4}
+<<<<<<< HEAD
     callOp(Curl, vorticity_f, method=method, dom=box2, resolution=resol2)
+=======
+    callOp(Curl, vorticity_f, method=method, dom=box2, discretization=d3D_2)
+>>>>>>> Parmepy v0
 
 
 def test_CurlFD2_2():
@@ -146,6 +251,7 @@ def test_CurlFD2_2():
     from parmepy.operator.differential import Curl
     method = {SpaceDiscretisation: FD_C_2}
     callOp(Curl, vorticity_f, method=method, order=2, dom=box2,
+<<<<<<< HEAD
            resolution=resol2)
 
 
@@ -155,6 +261,9 @@ def test_CurlFFT_2():
     method = {SpaceDiscretisation: 'fftw'}
     callOp(Curl, vorticity_f, method=method, order=6, dom=box2,
            resolution=resol2)
+=======
+           discretization=d3D_2)
+>>>>>>> Parmepy v0
 
 
 def test_Grad_2():
@@ -163,7 +272,11 @@ def test_Grad_2():
     method = {SpaceDiscretisation: FD_C_2}
     from parmepy.operator.differential import Grad
     callOp(Grad, grad_velo, op_dim=9, method=method, order=2, dom=box2,
+<<<<<<< HEAD
            resolution=resol2)
+=======
+           discretization=d3D_2)
+>>>>>>> Parmepy v0
 
 
 def test_Grad2_2():
@@ -172,7 +285,56 @@ def test_Grad2_2():
     from parmepy.methods import FD_C_4
     method = {SpaceDiscretisation: FD_C_4}
     callOp(Grad, grad_velo, op_dim=9, method=method, dom=box2,
+<<<<<<< HEAD
            resolution=resol2)
+=======
+           discretization=d3D_2)
+
+
+def test_CurlFD_work():
+    from parmepy.methods_keys import SpaceDiscretisation
+    from parmepy.methods import FD_C_4
+    from parmepy.operator.differential import Curl
+    # Velocity and result fields
+    velo = Field(domain=box, formula=velocity_f, isVector=True)
+    result = Field(domain=box, nbComponents=box.dimension)
+    # Curl operator
+    Op = Curl(velo, result, discretization=d3D)
+
+    Op.discretize()
+    topo = Op.discreteFields[velo].topology
+
+    # Reference field
+    ref = Field(domain=box, formula=vorticity_f, nbComponents=box.dimension)
+    ref_d = ref.discretize(topo)
+    ref.initialize(topo=topo)
+    res_d = result.discreteFields[topo]
+
+    # initialize velocity ...
+    velo.initialize(topo=topo)
+    wk_p = Op.get_work_properties()
+    rwork = []
+    wk_length = len(wk_p['rwork'])
+    for i in xrange(wk_length):
+        memshape = wk_p['rwork'][i]
+        rwork.append(npw.zeros(memshape))
+
+    Op.setup(rwork=rwork)
+
+    # Compute curl
+    Op.apply()
+
+    # Compare results with reference
+    ind = topo.mesh.iCompute
+    err = npw.zeros((box.dimension))
+    for i in xrange(box.dimension):
+        err[i] = (box.length[i] / (d3D.resolution[i] - 1)) ** 4
+    for i in xrange(result.nbComponents):
+        print ('err = O(h**order) =', err[i % box.dimension])
+        assert np.allclose(res_d[i][ind], ref_d[i][ind],
+                           rtol=err[i % box.dimension])
+    Op.finalize()
+>>>>>>> Parmepy v0
 
 
 # This may be useful to run mpi tests
@@ -184,6 +346,13 @@ if __name__ == "__main__":
     test_Grad2()
     test_CurlFD_2()
     test_CurlFD2_2()
+<<<<<<< HEAD
     test_CurlFFT_2()
     test_Grad_2()
     test_Grad2_2()
+=======
+    #test_CurlFFT_2()
+    test_Grad_2()
+    test_Grad2_2()
+    test_CurlFD_work()
+>>>>>>> Parmepy v0
diff --git a/HySoP/hysop/operator/tests/test_diffusion.py b/HySoP/hysop/operator/tests/test_diffusion.py
new file mode 100755
index 0000000000000000000000000000000000000000..0d4a277bc547708d4055728e7d7e68797c609a2e
--- /dev/null
+++ b/HySoP/hysop/operator/tests/test_diffusion.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+
+import parmepy as pp
+from parmepy.operator.diffusion import Diffusion
+from parmepy.operator.analytic import Analytic
+from parmepy.problem.simulation import Simulation
+from parmepy.tools.parameters import Discretization
+import numpy as np
+import parmepy.tools.numpywrappers as npw
+import math
+pi = math.pi
+sin = np.sin
+cos = np.cos
+## Physical Domain description
+dim = 3
+LL = 2 * pi * npw.ones((dim))
+cc = 2 * pi / LL
+d3D = Discretization([33, 33, 33])
+d2D = Discretization([33, 33])
+
+
+def computeVort(res, x, y, z, t):
+    res[0][...] = sin(x * cc[0]) * sin(y * cc[1]) * cos(z * cc[2])
+    res[1][...] = cos(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])
+    res[2][...] = cos(x * cc[0]) * cos(y * cc[1]) * sin(z * cc[2])
+    return res
+
+
+def computeVort2D(res, x, y, t):
+    # todo ...
+    res[0][...] = 4 * pi ** 2 * (cos(x * cc[0]) * sin(y * cc[1])) * \
+        (1. / LL[0] ** 2 + 1. / LL[1] ** 2)
+    return res
+
+
+def test_Diffusion3D():
+    dom = pp.Box(length=LL)
+
+    # Fields
+    vorticity = pp.Field(domain=dom, formula=computeVort,
+                         name='Vorticity', isVector=True)
+
+    # Definition of the Poisson operator
+    diff = Diffusion(viscosity=0.3, vorticity=vorticity, discretization=d3D)
+    diff.discretize()
+    diff.setup()
+    topo = diff.discreteFields[vorticity].topology
+    simu = Simulation(nbIter=10)
+    vorticity.initialize(topo=topo)
+    diff.apply(simu)
+    diff.finalize()
+
+
+def test_Diffusion3D_2():
+    dom = pp.Box(length=LL)
+
+    # Fields
+    vorticity = pp.Field(domain=dom, formula=computeVort,
+                         name='Vorticity', isVector=True)
+
+    # Definition of the Poisson operator
+    diff = Diffusion(viscosity=0.3, variables={vorticity: d3D})
+    diff.discretize()
+    diff.setup()
+    topo = diff.discreteFields[vorticity].topology
+    simu = Simulation(nbIter=10)
+    vorticity.initialize(topo=topo)
+    diff.apply(simu)
+    diff.finalize()
+
+
+def test_Diffusion2D():
+    dom = pp.Box(length=LL[:2])
+
+    # Fields
+    vorticity = pp.Field(domain=dom, formula=computeVort2D, name='Vorticity')
+
+    # Definition of the Poisson operator
+    diff = Diffusion(viscosity=0.3, vorticity=vorticity, discretization=d2D)
+    diff.discretize()
+    diff.setup()
+    topo = diff.discreteFields[vorticity].topology
+    simu = Simulation(nbIter=10)
+    vorticity.initialize(topo=topo)
+    diff.apply(simu)
+    diff.finalize()
+
+
+# This may be useful to run mpi tests
+if __name__ == "__main__":
+    test_Diffusion3D()
+    test_Diffusion3D_2()
+    test_Diffusion2D()
diff --git a/HySoP/hysop/operator/tests/test_energy_enstrophy.py b/HySoP/hysop/operator/tests/test_energy_enstrophy.py
new file mode 100644
index 0000000000000000000000000000000000000000..1704c54e9137114226689f05afe68fe1f78aed1a
--- /dev/null
+++ b/HySoP/hysop/operator/tests/test_energy_enstrophy.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+import parmepy as pp
+from parmepy.operator.energy_enstrophy import EnergyEnstrophy
+from parmepy.problem.simulation import Simulation
+from parmepy.tools.parameters import Discretization
+from parmepy import VariableParameter, Field
+import numpy as np
+import parmepy.tools.numpywrappers as npw
+import os
+from scipy.integrate import nquad
+sin = np.sin
+cos = np.cos
+
+d3d = Discretization([129, 129, 129])
+
+
+def computeVel(res, x, y, z, t):
+    res[0][...] = x
+    res[1][...] = y
+    res[2][...] = z
+    return res
+
+
+def computeVort(res, x, y, z, t):
+    res[0][...] = x
+    res[1][...] = y
+    res[2][...] = z
+    return res
+
+
+def energy_ref(x, y, z):
+    return x ** 2
+
+
+def init():
+    box = pp.Box(length=[2., 1., 0.9], origin=[0.0, -1., -0.43])
+    velo = Field(domain=box, formula=computeVel,
+                 name='Velocity', isVector=True)
+    vorti = Field(domain=box, formula=computeVort,
+                  name='Vorticity', isVector=True)
+    return velo, vorti
+
+
+def test_energy_enstrophy():
+    """
+    Todo : write proper tests.
+    Here we just check if discr/setup/apply process goes well.
+    """
+    dim = 3
+    velo, vorti = init()
+    op = EnergyEnstrophy(velo, vorti, discretization=d3d)
+    op.discretize()
+    op.setup()
+    topo = op.variables[velo]
+    velo.initialize(topo=topo)
+    vorti.initialize(topo=topo)
+    simu = Simulation(nbIter=2)
+    op.apply(simu)
+    intrange = []
+    box = topo.domain
+    invvol = 1./ np.prod(box.length)
+    for i in xrange(dim):
+        origin = box.origin[i]
+        end = origin + box.length[i]
+        intrange.append([origin, end])
+    intrange = 2 * intrange
+    Eref = nquad(energy_ref, intrange[:dim])[0]
+    Eref += nquad(energy_ref, intrange[1:dim + 1])[0]
+    Eref += nquad(energy_ref, intrange[2:dim + 2])[0]
+    Eref *= invvol
+    tol = (topo.mesh.space_step).max() ** 2
+    assert (op.energy() - Eref * 0.5) < tol
+    assert (op.enstrophy() - Eref) < tol
+
+if __name__ == "__main__":
+    test_energy_enstrophy()
diff --git a/HySoP/hysop/operator/tests/test_hdf5/testIO_scal.xmf b/HySoP/hysop/operator/tests/test_hdf5/testIO_scal.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..167c44c016c1bae9db38c54b879c7400eaa3d5f1
--- /dev/null
+++ b/HySoP/hysop/operator/tests/test_hdf5/testIO_scal.xmf
@@ -0,0 +1,42 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.0" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="64  64  64 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.03125  0.03125  0.03125
+     </DataItem>
+    </Geometry>
+    <Attribute Name="ScalRef3D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="64  64  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      testIO_scal_00000.h5:/ScalRef3D_0_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+   <Grid Name="Iteration 001" GridType="Uniform">
+    <Time Value="0.3" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="64  64  64 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.03125  0.03125  0.03125
+     </DataItem>
+    </Geometry>
+    <Attribute Name="ScalRef3D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="64  64  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      testIO_scal_00001.h5:/ScalRef3D_0_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/test_hdf5/testIO_scal_00000.h5 b/HySoP/hysop/operator/tests/test_hdf5/testIO_scal_00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..3a2446225bd61bbf04a60851a154984eae08d2fc
Binary files /dev/null and b/HySoP/hysop/operator/tests/test_hdf5/testIO_scal_00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/test_hdf5/testIO_scal_00001.h5 b/HySoP/hysop/operator/tests/test_hdf5/testIO_scal_00001.h5
new file mode 100644
index 0000000000000000000000000000000000000000..3a2446225bd61bbf04a60851a154984eae08d2fc
Binary files /dev/null and b/HySoP/hysop/operator/tests/test_hdf5/testIO_scal_00001.h5 differ
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io.py b/HySoP/hysop/operator/tests/test_hdf5_io.py
index 707425f13dce8668af4086d2ef841d38eb666eb9..989d7e0e381be3bec68480a9bee266f219468d92 100644
--- a/HySoP/hysop/operator/tests/test_hdf5_io.py
+++ b/HySoP/hysop/operator/tests/test_hdf5_io.py
@@ -3,120 +3,399 @@
 Tests for reader/writer of fields in hdf5 format.
 """
 
-from parmepy.operator.monitors.printer import Printer
-from parmepy.operator.monitors.reader import Reader
 from parmepy import Box, Field
-from parmepy.mpi.topology import Cartesian
 import numpy as np
 import os
 from parmepy.constants import HDF5
 from parmepy.problem.simulation import Simulation
 import shutil
+from parmepy.tools.parameters import Discretization, IO_params
+from parmepy.operator.hdf_io import HDF_Writer, HDF_Reader
+import parmepy.tools.io_utils as io
+from parmepy.mpi import main_rank, main_size
 
-Lx = Ly = Lz = 2.
+Lx = 2.
 nb = 65
-workingDir = os.getcwd() + '/test_hdf5/'
+working_dir = os.getcwd() + '/test_hdf5/p' + str(main_size)
+#io.io.set_default_path(os.getcwd() + '/test_hdf5/')
+if main_rank == 0:
+    print 'Set I/O default path to ', io.io.default_path()
 
+cos = np.cos
+sin = np.sin
 
-def init(dim):
-    # Domain (box-shaped)
-    dom = Box(dimension=dim, length=[Lx]*dim, origin=[-1.]*dim)
+
+def init1(dim):
+    # Domain (cubic)
+    dom = Box(length=[Lx] * dim, origin=[-1.] * dim)
     # global resolution for the grid
-    resol = [nb] * dim
-    ghosts = np.zeros(dim) + 2
-    topo = Cartesian(dom, dom.dimension, resol, ghosts=ghosts)
+    resol = Discretization([nb] * dim, [2] * dim)
+    topo = dom.create_topology(discretization=resol)
+    return dom, topo
+
 
+def init2():
+    # Domain (not cubic)
+    dom = Box(length=[Lx, 2 * Lx, 3.9 * Lx], origin=[-1., 2., 3.9])
+    # global resolution for the grid
+    resol = Discretization([nb, 2 * nb, nb + 8], [2, 0, 1])
+    topo = dom.create_topology(discretization=resol)
     return dom, topo
 
 
-# Example of function to init the scalar field
 def func3D(res, x, y, z, t):
-    res[0][...] = np.cos(t * x) + np.sin(y) + z
+    res[0][...] = cos(t * x) + sin(y) + z
+    return res
+
+
+def vec3D(res, x, y, z, t):
+    res[0][...] = cos(t * x) + sin(y) + z + 0.2
+    res[1][...] = sin(t * x) + sin(y) + z + 0.3
+    res[2][...] = 3 * cos(2 * t * x) + sin(y) + y
+    return res
+
+
+def vort3D(res, x, y, z, t):
+    res[0][...] = 3 * cos(2 * t * x) + cos(y) + z
+    res[1][...] = sin(t * y) + x + 0.2
+    res[2][...] = 3 * cos(t) + sin(y) + z
     return res
 
 
 def purgeFiles():
-    shutil.rmtree(workingDir)
+    if main_rank == 0:
+        shutil.rmtree(working_dir)
 
 
 def test_write_read_scalar_3D():
-    dom, topo = init(3)
+    dom, topo = init1(3)
     scal3D = Field(domain=dom, name='Scal3D')
     scalRef = Field(domain=dom, formula=func3D, name='ScalRef3D')
-    scRef = scalRef.discretize(topo)
-    sc3D = scal3D.discretize(topo)
-    printer = Printer(variables=[scalRef], topo=topo,
-                      formattype=HDF5, prefix=workingDir+'/testIO_scal')
-    simu = Simulation(tinit=0.0, tend=2., nbIter=100, iterMax=1000000)
-    scalRef.initialize(currentTime=simu.start, topo=topo)
-    # Print scalRef for t = tinit.
-    printer.apply(simu)
+
+    filename = working_dir + '/testIO_scal'
+    iop = IO_params(filename, fileformat=HDF5)
+    op = HDF_Writer(variables={scalRef: topo}, io_params=iop)
+    simu = Simulation(nbIter=10)
+    op.discretize()
+    op.setup()
+
+    scalRef.initialize(simu.time, topo=topo)
+    op.apply(simu)
+
     simu.initialize()
     simu.advance()
     simu.advance()
-    # Print scalRef for another time
-    printer.apply(simu)
-    printer.finalize()
-    assert os.path.exists(workingDir + '/testIO_scal.xmf')
-    assert os.path.exists(workingDir + '/testIO_scal_00000.h5')
-    assert os.path.exists(workingDir + '/testIO_scal_00001.h5')
+    # Print scalRef for other iterations
+    op.apply(simu)
+    op.finalize()
+    fullpath = iop.filename
+    assert os.path.exists(fullpath + '.xmf')
+    assert os.path.exists(fullpath + '_00000.h5')
+    assert os.path.exists(fullpath + '_00001.h5')
+
     # Reader
-    reader = Reader(variables=[scal3D], topo=topo,
-                    prefix=workingDir+'/testIO_scal_00000')
-  #  print reader.dataset_names()
-  #  print reader.names
-    assert not np.allclose(sc3D.data[0][topo.mesh.iCompute],
-                           scRef.data[0][topo.mesh.iCompute])
+    iop_read = IO_params(working_dir + '/testIO_scal_00001.h5',
+                         fileformat=HDF5)
+    reader = HDF_Reader(variables=[scal3D], discretization=topo,
+                        io_params=iop_read,
+                        var_names={scal3D: 'ScalRef3D_' + str(topo.get_id())})
+    reader.discretize()
+    reader.setup()
+    sc3d = scal3D.discretization(topo)
+    scref = scalRef.discretization(topo)
+    ind = topo.mesh.iCompute
+    for d in xrange(scal3D.nbComponents):
+        sc3d.data[d][...] = 0.0
+        assert not np.allclose(scref.data[d][ind], sc3d.data[d][ind])
     reader.apply()
     reader.finalize()
-    # Check if scal3D is equal to scalRef
-    assert np.allclose(sc3D.data[0][topo.mesh.iCompute],
-                       scRef.data[0][topo.mesh.iCompute])
-    purgeFiles()
+
+    for d in xrange(scal3D.nbComponents):
+        assert np.allclose(scref.data[d][ind], sc3d.data[d][ind])
 
 
-def test_write_read_2scalar_3D():
-    dom, topo = init(3)
+def test_write_read_scalar_3D_defaults():
+    dom, topo = init1(3)
     scal3D = Field(domain=dom, name='Scal3D')
     scalRef = Field(domain=dom, formula=func3D, name='ScalRef3D')
-    scRef = scalRef.discretize(topo)
-    sc3D = scal3D.discretize(topo)
-    scal3D2 = Field(domain=dom, name='Scal3D_2')
-    scalRef2 = Field(domain=dom, formula=func3D, name='ScalRef23D')
-    scRef2 = scalRef2.discretize(topo)
-    sc3D2 = scal3D2.discretize(topo)
-    printer = Printer(variables=[scalRef, scalRef2], topo=topo,
-                      formattype=HDF5, prefix=workingDir+'/testIO_scal')
-    simu = Simulation(tinit=0.0, tend=2., nbIter=100, iterMax=1000000)
-    scalRef.initialize(currentTime=simu.start, topo=topo)
-    scalRef2.initialize(currentTime=simu.start, topo=topo)
-    scRef2[0][...] *= 3.
-    printer.apply(simu)
-    printer.finalize()
-    # Print scalRef/ScalRef2 for t = tinit.
-    # Read only scalRef2 into scal3D
-    reader = Reader(variables=[scal3D], topo=topo,
-                    prefix=workingDir+'/testIO_scal_00000',
-                    names={scal3D: 'ScalRef23D'})
-    assert not np.allclose(sc3D.data[0][topo.mesh.iCompute],
-                           scRef2.data[0][topo.mesh.iCompute])
+
+    # Write a scalar field, using default configuration for output
+    # names and location
+    op = HDF_Writer(variables={scalRef: topo})
+    simu = Simulation(nbIter=3)
+    op.discretize()
+    op.setup()
+    scal3D.discretize(topo=topo)
+    scalRef.initialize(simu.time, topo=topo)
+    simu.initialize()
+    while not simu.isOver:
+        op.apply(simu)
+        simu.advance()
+
+    op.finalize()
+    filename = scalRef.name
+    fullpath = os.path.join(io.io.default_path(), filename)
+
+    assert os.path.exists(fullpath + '_.xmf')
+    assert os.path.exists(fullpath + '__00000.h5')
+    assert os.path.exists(fullpath + '__00001.h5')
+
+    sc3d = scal3D.discretization(topo)
+    scref = scalRef.discretization(topo)
+    ind = topo.mesh.iCompute
+    for d in xrange(scal3D.nbComponents):
+        sc3d.data[d][...] = scref.data[d][...]
+        scref.data[d][...] = 0.0
+        # reinit ScalRef
+
+    # Read a scalar field, using default configuration for output
+    # names and location, with a given iteration number.
+    reader = HDF_Reader(variables={scalRef: topo},
+                        restart=simu.currentIteration - 1)
+    reader.discretize()
+    reader.setup()
+    for d in xrange(scal3D.nbComponents):
+        assert not np.allclose(scref.data[d][ind], sc3d.data[d][ind])
+    reader.apply()
+    reader.finalize()
+
+    for d in xrange(scal3D.nbComponents):
+        assert np.allclose(scref.data[d][ind], sc3d.data[d][ind])
+
+
+def test_write_read_vectors_3D_defaults():
+    dom, topo = init2()
+    velo = Field(domain=dom, formula=vec3D, name='velo', isVector=True)
+    vorti = Field(domain=dom, formula=vort3D, name='vorti', isVector=True)
+
+    # Write a vector field, using default configuration for output
+    # names and location
+    op = HDF_Writer(variables={velo: topo, vorti: topo})
+    simu = Simulation(nbIter=3)
+    op.discretize()
+    op.setup()
+    velo.initialize(simu.time, topo=topo)
+    vorti.initialize(simu.time, topo=topo)
+    simu.initialize()
+    while not simu.isOver:
+        op.apply(simu)
+        simu.advance()
+
+    op.finalize()
+    filename = ''
+    for v in op.input:
+        filename += v.name
+        filename += '_'
+    fullpath = os.path.join(io.io.default_path(), filename)
+
+    assert os.path.exists(fullpath + '.xmf')
+    assert os.path.exists(fullpath + '_00000.h5')
+    assert os.path.exists(fullpath + '_00001.h5')
+
+    v3d = velo.discretization(topo)
+    w3d = vorti.discretization(topo)
+    ind = topo.mesh.iCompute
+
+    buff1 = Field(domain=dom, name='buff1', isVector=True)
+    buff2 = Field(domain=dom, name='buff2', isVector=True)
+    b1 = buff1.discretize(topo=topo)
+    b2 = buff2.discretize(topo=topo)
+    for d in xrange(velo.nbComponents):
+        b1.data[d][...] = v3d.data[d][...]
+        b2.data[d][...] = w3d.data[d][...]
+
+        v3d.data[d][...] = 0.0
+        w3d.data[d][...] = 0.0
+
+    # Read vector fields, using default configuration for output
+    # names and location, with a given iteration number.
+    reader = HDF_Reader(variables={velo: topo, vorti: topo},
+                        restart=simu.currentIteration - 1)
+    reader.discretize()
+    reader.setup()
+    for d in xrange(v3d.nbComponents):
+        assert not np.allclose(b1.data[d][ind], v3d.data[d][ind])
+        assert not np.allclose(b2.data[d][ind], w3d.data[d][ind])
+
     reader.apply()
     reader.finalize()
-    # Check if scal3D is equal to scalRef
-    assert np.allclose(sc3D.data[0][topo.mesh.iCompute],
-                       scRef2.data[0][topo.mesh.iCompute])
-    sc3D[0][...] = 0.0
-    # Read scalRef and scalRef2 into scal3D and scal3D2
-    reader2 = Reader(variables=[scal3D, scal3D2], topo=topo,
-                     prefix=workingDir+'/testIO_scal_00000',
-                     names={scal3D: 'ScalRef3D', scal3D2: 'ScalRef23D'})
-    #    assert not np.allclose(sc3D.data[0][topo.mesh.iCompute],
-    #                       scRef2.data[0][topo.mesh.iCompute])
-    reader2.apply()
-    reader2.finalize()
-    # Check if scal3D is equal to scalRef and scal3D2 == scalRef2
-    assert np.allclose(sc3D.data[0][topo.mesh.iCompute],
-                       scRef.data[0][topo.mesh.iCompute])
-    assert np.allclose(sc3D2.data[0][topo.mesh.iCompute],
-                       scRef2.data[0][topo.mesh.iCompute])
+    for d in xrange(v3d.nbComponents):
+        assert np.allclose(b1.data[d][ind], v3d.data[d][ind])
+        assert np.allclose(b2.data[d][ind], w3d.data[d][ind])
+
+
+def test_write_read_vectors_3D():
+    dom, topo = init2()
+    velo = Field(domain=dom, formula=vec3D, name='velo', isVector=True)
+    vorti = Field(domain=dom, formula=vort3D, name='vorti', isVector=True)
+
+    # Write a vector field, using default for output location
+    # but with fixed names for datasets
+    filename = working_dir + '/testIO_vec'
+    iop = IO_params(filename, fileformat=HDF5)
+    op = HDF_Writer(variables={velo: topo, vorti: topo},
+                    var_names={velo: 'io_1', vorti: 'io_2'}, io_params=iop)
+    simu = Simulation(nbIter=3)
+    op.discretize()
+    op.setup()
+
+    velo.initialize(simu.time, topo=topo)
+    vorti.initialize(simu.time, topo=topo)
+    simu.initialize()
+    while not simu.isOver:
+        op.apply(simu)
+        simu.advance()
+
+    op.finalize()
+
+    # filename = ''
+    # for v in op.input:
+    #     filename += v.name
+    #     filename += '_'
+    fullpath = iop.filename
+    assert os.path.exists(fullpath + '.xmf')
+    assert os.path.exists(fullpath + '_00000.h5')
+    assert os.path.exists(fullpath + '_00001.h5')
+
+    v3d = velo.discretization(topo)
+    w3d = vorti.discretization(topo)
+    ind = topo.mesh.iCompute
+
+    buff1 = Field(domain=dom, name='buff1', isVector=True)
+    buff2 = Field(domain=dom, name='buff2', isVector=True)
+
+    # Read vector fields, fixed filename, fixed dataset names.
+    iop_read = IO_params(working_dir + '/testIO_vec_00001.h5',
+                         fileformat=HDF5)
+    reader = HDF_Reader(variables={buff1: topo, buff2: topo},
+                        io_params=iop_read,
+                        var_names={buff1: 'io_2', buff2: 'io_1'})
+    reader.discretize()
+    reader.setup()
+    reader.apply()
+    reader.finalize()
+    b1 = buff1.discretization(topo)
+    b2 = buff2.discretization(topo)
+    for d in xrange(v3d.nbComponents):
+        assert np.allclose(b2.data[d][ind], v3d.data[d][ind])
+        assert np.allclose(b1.data[d][ind], w3d.data[d][ind])
+
+
+def test_write_read_subset_1():
+    dom, topo = init2()
+    velo = Field(domain=dom, formula=vec3D, name='velo', isVector=True)
+
+    # A subset of the current domain
+    from parmepy.domain.subsets.boxes import SubBox
+    mybox = SubBox(origin=[-0.5, 2.3, 4.1], length=[Lx / 2, Lx / 3, Lx],
+                   parent=dom)
+    # Write a vector field, using default for output location
+    # but with fixed names for datasets
+    op = HDF_Writer(variables={velo: topo}, var_names={velo: 'io_1'},
+                    subset=mybox)
+    simu = Simulation(nbIter=3)
+    op.discretize()
+    op.setup()
+    velo.initialize(simu.time, topo=topo)
+    simu.initialize()
+    while not simu.isOver:
+        op.apply(simu)
+        simu.advance()
+    op.finalize()
+
+    filename = ''
+    for v in op.input:
+        filename += v.name
+        filename += '_'
+    fullpath = os.path.join(io.io.default_path(), filename)
+
+    assert os.path.exists(fullpath + '.xmf')
+    assert os.path.exists(fullpath + '_00000.h5')
+    assert os.path.exists(fullpath + '_00001.h5')
+
+    v3d = velo.discretize(topo)
+    ind = topo.mesh.iCompute
+    indsubset = mybox.mesh[topo].iCompute
+
+    buff1 = Field(domain=dom, name='buff1', isVector=True)
+
+    # Read vector fields, fixed filename, fixed dataset names.
+    iop = IO_params(filename + '_00000.h5', fileformat=HDF5)
+    reader = HDF_Reader(variables={buff1: topo},
+                        io_params=iop,
+                        var_names={buff1: 'io_1'}, subset=mybox)
+    reader.discretize()
+    reader.setup()
+    reader.apply()
+    reader.finalize()
+    b1 = buff1.discretization(topo)
+    for d in xrange(v3d.nbComponents):
+        assert not np.allclose(b1.data[d][ind], v3d.data[d][ind])
+        assert np.allclose(b1.data[d][indsubset], v3d.data[d][indsubset])
+
+
+def test_write_read_subset_2():
+    dom, topo = init2()
+    velo = Field(domain=dom, formula=vec3D, name='velo', isVector=True)
+
+    # A subset of the current domain
+    from parmepy.domain.subsets.boxes import SubBox
+    # a plane ...
+    mybox = SubBox(origin=[-0.5, 2.3, 4.1], length=[Lx / 2, Lx / 3, 0.0],
+                   parent=dom)
+    # Write a vector field, using default for output location
+    # but with fixed names for datasets
+    op = HDF_Writer(variables={velo: topo}, var_names={velo: 'io_1'},
+                    subset=mybox)
+    simu = Simulation(nbIter=3)
+    op.discretize()
+    op.setup()
+    velo.initialize(simu.time, topo=topo)
+    simu.initialize()
+    while not simu.isOver:
+        op.apply(simu)
+        simu.advance()
+    op.finalize()
+
+    filename = ''
+    for v in op.input:
+        filename += v.name
+        filename += '_'
+    fullpath = os.path.join(io.io.default_path(), filename)
+
+    assert os.path.exists(fullpath + '.xmf')
+    assert os.path.exists(fullpath + '_00000.h5')
+    assert os.path.exists(fullpath + '_00001.h5')
+
+    v3d = velo.discretize(topo)
+    ind = topo.mesh.iCompute
+    indsubset = mybox.mesh[topo].iCompute
+
+    buff1 = Field(domain=dom, name='buff1', isVector=True)
+
+    # Read vector fields, fixed filename, fixed dataset names.
+    iop = IO_params(filename + '_00000.h5', fileformat=HDF5)
+    reader = HDF_Reader(variables={buff1: topo},
+                        io_params=iop,
+                        var_names={buff1: 'io_1'}, subset=mybox)
+    reader.discretize()
+    reader.setup()
+    reader.apply()
+    reader.finalize()
+    b1 = buff1.discretization(topo)
+    for d in xrange(v3d.nbComponents):
+        assert not np.allclose(b1.data[d][ind], v3d.data[d][ind])
+        assert np.allclose(b1.data[d][indsubset], v3d.data[d][indsubset])
+
+
+# This may be useful to run mpi tests
+if __name__ == "__main__":
+    test_write_read_scalar_3D()
+    test_write_read_scalar_3D_defaults()
+    test_write_read_vectors_3D_defaults()
+    test_write_read_vectors_3D()
+    test_write_read_subset_1()
+    test_write_read_subset_2()
+    if main_rank == 0:
+        shutil.rmtree(io.io.default_path())
     purgeFiles()
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/ScalRef3D_.xmf b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/ScalRef3D_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..5a79944cd588471e28e65fce8ddafab0611a109f
--- /dev/null
+++ b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/ScalRef3D_.xmf
@@ -0,0 +1,59 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.333333333333" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="64  64  64 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.03125  0.03125  0.03125
+     </DataItem>
+    </Geometry>
+    <Attribute Name="ScalRef3D_1_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="64  64  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      ScalRef3D__00000.h5:/ScalRef3D_1_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+   <Grid Name="Iteration 001" GridType="Uniform">
+    <Time Value="0.666666666667" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="64  64  64 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.03125  0.03125  0.03125
+     </DataItem>
+    </Geometry>
+    <Attribute Name="ScalRef3D_1_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="64  64  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      ScalRef3D__00001.h5:/ScalRef3D_1_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+   <Grid Name="Iteration 002" GridType="Uniform">
+    <Time Value="1.0" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="64  64  64 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.03125  0.03125  0.03125
+     </DataItem>
+    </Geometry>
+    <Attribute Name="ScalRef3D_1_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="64  64  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      ScalRef3D__00002.h5:/ScalRef3D_1_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/ScalRef3D__00000.h5 b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/ScalRef3D__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..70243a0dfc538b43f9c0e77b388e7c39d70b7ef4
Binary files /dev/null and b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/ScalRef3D__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/ScalRef3D__00001.h5 b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/ScalRef3D__00001.h5
new file mode 100644
index 0000000000000000000000000000000000000000..70243a0dfc538b43f9c0e77b388e7c39d70b7ef4
Binary files /dev/null and b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/ScalRef3D__00001.h5 differ
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/ScalRef3D__00002.h5 b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/ScalRef3D__00002.h5
new file mode 100644
index 0000000000000000000000000000000000000000..70243a0dfc538b43f9c0e77b388e7c39d70b7ef4
Binary files /dev/null and b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/ScalRef3D__00002.h5 differ
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/testIO_scal.xmf b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/testIO_scal.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..167c44c016c1bae9db38c54b879c7400eaa3d5f1
--- /dev/null
+++ b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/testIO_scal.xmf
@@ -0,0 +1,42 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.0" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="64  64  64 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.03125  0.03125  0.03125
+     </DataItem>
+    </Geometry>
+    <Attribute Name="ScalRef3D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="64  64  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      testIO_scal_00000.h5:/ScalRef3D_0_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+   <Grid Name="Iteration 001" GridType="Uniform">
+    <Time Value="0.3" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="64  64  64 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     -1.0  -1.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.03125  0.03125  0.03125
+     </DataItem>
+    </Geometry>
+    <Attribute Name="ScalRef3D_0_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="64  64  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      testIO_scal_00001.h5:/ScalRef3D_0_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/testIO_scal_00000.h5 b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/testIO_scal_00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..286edb5df96691b9883828f22b53c311d1ba3f24
Binary files /dev/null and b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/testIO_scal_00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/testIO_scal_00001.h5 b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/testIO_scal_00001.h5
new file mode 100644
index 0000000000000000000000000000000000000000..286edb5df96691b9883828f22b53c311d1ba3f24
Binary files /dev/null and b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/testIO_scal_00001.h5 differ
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/velo_vorti_.xmf b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/velo_vorti_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..3f1c020b1356959c80ce6649726c0aeb9350ab51
--- /dev/null
+++ b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/velo_vorti_.xmf
@@ -0,0 +1,134 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.333333333333" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="72  129  64 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     3.8999999999999999  2.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.10833333333333334  0.031007751937984496  0.03125
+     </DataItem>
+    </Geometry>
+    <Attribute Name="velo_2_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00000.h5:/velo_2_Z
+     </DataItem>
+    </Attribute>
+    <Attribute Name="velo_2_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00000.h5:/velo_2_X
+     </DataItem>
+    </Attribute>
+    <Attribute Name="velo_2_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00000.h5:/velo_2_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00000.h5:/vorti_2_Z
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00000.h5:/vorti_2_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00000.h5:/vorti_2_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+   <Grid Name="Iteration 001" GridType="Uniform">
+    <Time Value="0.666666666667" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="72  129  64 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     3.8999999999999999  2.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.10833333333333334  0.031007751937984496  0.03125
+     </DataItem>
+    </Geometry>
+    <Attribute Name="velo_2_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00001.h5:/velo_2_Z
+     </DataItem>
+    </Attribute>
+    <Attribute Name="velo_2_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00001.h5:/velo_2_X
+     </DataItem>
+    </Attribute>
+    <Attribute Name="velo_2_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00001.h5:/velo_2_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00001.h5:/vorti_2_Z
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00001.h5:/vorti_2_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00001.h5:/vorti_2_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+   <Grid Name="Iteration 002" GridType="Uniform">
+    <Time Value="1.0" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="72  129  64 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     3.8999999999999999  2.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.10833333333333334  0.031007751937984496  0.03125
+     </DataItem>
+    </Geometry>
+    <Attribute Name="velo_2_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00002.h5:/velo_2_Z
+     </DataItem>
+    </Attribute>
+    <Attribute Name="velo_2_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00002.h5:/velo_2_X
+     </DataItem>
+    </Attribute>
+    <Attribute Name="velo_2_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00002.h5:/velo_2_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00002.h5:/vorti_2_Z
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00002.h5:/vorti_2_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      velo_vorti__00002.h5:/vorti_2_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/velo_vorti__00000.h5 b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/velo_vorti__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..4840283fbfb035c18e3430a02e61d10f7b96c06f
Binary files /dev/null and b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/velo_vorti__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/velo_vorti__00001.h5 b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/velo_vorti__00001.h5
new file mode 100644
index 0000000000000000000000000000000000000000..51b2234556750f019ae940f5f13dc60a7ecef63c
Binary files /dev/null and b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/velo_vorti__00001.h5 differ
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/velo_vorti__00002.h5 b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/velo_vorti__00002.h5
new file mode 100644
index 0000000000000000000000000000000000000000..ab2b0b2ec34470101e75459841fd9b7df1808254
Binary files /dev/null and b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/velo_vorti__00002.h5 differ
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/vorti_velo_.xmf b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/vorti_velo_.xmf
new file mode 100644
index 0000000000000000000000000000000000000000..dfc8f1ed658e69d3bd66fc3310b759f8192f1541
--- /dev/null
+++ b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/vorti_velo_.xmf
@@ -0,0 +1,134 @@
+<?xml version="1.0" ?>
+<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
+<Xdmf Version="2.0">
+ <Domain>
+  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
+   <Grid Name="Iteration 000" GridType="Uniform">
+    <Time Value="0.333333333333" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="72  129  64 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     3.8999999999999999  2.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.10833333333333334  0.031007751937984496  0.03125
+     </DataItem>
+    </Geometry>
+    <Attribute Name="velo_2_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00000.h5:/velo_2_Z
+     </DataItem>
+    </Attribute>
+    <Attribute Name="velo_2_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00000.h5:/velo_2_X
+     </DataItem>
+    </Attribute>
+    <Attribute Name="velo_2_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00000.h5:/velo_2_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00000.h5:/vorti_2_Z
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00000.h5:/vorti_2_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00000.h5:/vorti_2_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+   <Grid Name="Iteration 001" GridType="Uniform">
+    <Time Value="0.666666666667" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="72  129  64 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     3.8999999999999999  2.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.10833333333333334  0.031007751937984496  0.03125
+     </DataItem>
+    </Geometry>
+    <Attribute Name="velo_2_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00001.h5:/velo_2_Z
+     </DataItem>
+    </Attribute>
+    <Attribute Name="velo_2_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00001.h5:/velo_2_X
+     </DataItem>
+    </Attribute>
+    <Attribute Name="velo_2_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00001.h5:/velo_2_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00001.h5:/vorti_2_Z
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00001.h5:/vorti_2_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00001.h5:/vorti_2_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+   <Grid Name="Iteration 002" GridType="Uniform">
+    <Time Value="1.0" />
+    <Topology TopologyType="3DCORECTMesh" NumberOfElements="72  129  64 "/>
+    <Geometry GeometryType="ORIGIN_DXDYDZ">
+     <DataItem Dimensions="3 " NumberType="Float" Precision="4" Format="XML">
+     3.8999999999999999  2.0  -1.0
+     </DataItem>
+     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
+     0.10833333333333334  0.031007751937984496  0.03125
+     </DataItem>
+    </Geometry>
+    <Attribute Name="velo_2_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00002.h5:/velo_2_Z
+     </DataItem>
+    </Attribute>
+    <Attribute Name="velo_2_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00002.h5:/velo_2_X
+     </DataItem>
+    </Attribute>
+    <Attribute Name="velo_2_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00002.h5:/velo_2_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_Z" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00002.h5:/vorti_2_Z
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_Y" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00002.h5:/vorti_2_Y
+     </DataItem>
+    </Attribute>
+    <Attribute Name="vorti_2_X" AttributeType="Scalar" Center="Node">
+     <DataItem Dimensions="72  129  64 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
+      vorti_velo__00002.h5:/vorti_2_X
+     </DataItem>
+    </Attribute>
+   </Grid>
+  </Grid>
+ </Domain>
+</Xdmf>
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/vorti_velo__00000.h5 b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/vorti_velo__00000.h5
new file mode 100644
index 0000000000000000000000000000000000000000..d541a445fbc9c6f039fc56f5c63e0978eb68111d
Binary files /dev/null and b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/vorti_velo__00000.h5 differ
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/vorti_velo__00001.h5 b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/vorti_velo__00001.h5
new file mode 100644
index 0000000000000000000000000000000000000000..d541a445fbc9c6f039fc56f5c63e0978eb68111d
Binary files /dev/null and b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/vorti_velo__00001.h5 differ
diff --git a/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/vorti_velo__00002.h5 b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/vorti_velo__00002.h5
new file mode 100644
index 0000000000000000000000000000000000000000..22a8bdf14614e8a331786fa00acbb75f185d86be
Binary files /dev/null and b/HySoP/hysop/operator/tests/test_hdf5_io_flymake/p1/vorti_velo__00002.h5 differ
diff --git a/HySoP/hysop/operator/tests/test_particle_advection.py b/HySoP/hysop/operator/tests/test_particle_advection.py
index e8c5838ddb1bc0dc4ac05105fe2ad25ef7fd6632..103a6b871433dfecbe3394b4411a4c430a470f80 100644
--- a/HySoP/hysop/operator/tests/test_particle_advection.py
+++ b/HySoP/hysop/operator/tests/test_particle_advection.py
@@ -7,12 +7,19 @@ from parmepy.fields.continuous import Field
 from parmepy.operator.advection import Advection
 from parmepy.constants import ORDER, PARMES_REAL
 from parmepy.problem.simulation import Simulation
+from parmepy.tools.parameters import Discretization
 import parmepy.tools.numpywrappers as npw
 import numpy as np
 
 
+
+d2d = Discretization([17, 17])
+d3d = Discretization([17, 17, 17])
+
+
+
 def setup_2D():
-    box = Box(2, length=[2., 2.], origin=[-1., -1.])
+    box = Box(length=[2., 2.], origin=[-1., -1.])
     scal = Field(domain=box, name='Scalar')
     velo = Field(domain=box, name='Velocity',
                  formula=lambda x, y: (0., 0.), isVector=True)
@@ -20,7 +27,7 @@ def setup_2D():
 
 
 def setup_vector_2D():
-    box = Box(2, length=[2., 2.], origin=[-1., -1.])
+    box = Box(length=[2., 2.], origin=[-1., -1.])
     scal = Field(domain=box, name='Vector', isVector=True)
     velo = Field(domain=box, name='Velocity',
                  formula=lambda x, y: (0., 0.), isVector=True)
@@ -28,7 +35,7 @@ def setup_vector_2D():
 
 
 def setup_list_2D():
-    box = Box(2, length=[2., 2.], origin=[-1., -1.])
+    box = Box(length=[2., 2.], origin=[-1., -1.])
     scal1 = Field(domain=box, name='Scalar1')
     scal2 = Field(domain=box, name='Scalar2')
     velo = Field(domain=box, name='Velocity',
@@ -37,7 +44,7 @@ def setup_list_2D():
 
 
 def setup_3D():
-    box = Box(3, length=[2., 4., 1.], origin=[-1., -2., 0.])
+    box = Box(length=[2., 4., 1.], origin=[-1., -2., 0.])
     scal = Field(domain=box, name='Scalar')
     velo = Field(domain=box, name='Velocity',
                  formula=lambda x, y, z: (0., 0., 0.), isVector=True)
@@ -45,7 +52,7 @@ def setup_3D():
 
 
 def setup_vector_3D():
-    box = Box(3, length=[2., 4., 1.], origin=[-1., -2., 0.])
+    box = Box(length=[2., 4., 1.], origin=[-1., -2., 0.])
     scal = Field(domain=box, name='Scalar', isVector=True)
     velo = Field(domain=box, name='Velocity',
                  formula=lambda x, y, z: (0., 0., 0.), isVector=True)
@@ -53,7 +60,7 @@ def setup_vector_3D():
 
 
 def setup_list_3D():
-    box = Box(3, length=[2., 4., 1.], origin=[-1., -2., 0.])
+    box = Box(length=[2., 4., 1.], origin=[-1., -2., 0.])
     scal1 = Field(domain=box, name='Scalar1')
     scal2 = Field(domain=box, name='Scalar2')
     velo = Field(domain=box, name='Velocity',
@@ -61,12 +68,22 @@ def setup_list_3D():
     return [scal1, scal2], velo
 
 
+def setup_dict_3D():
+    box = Box(length=[2., 4., 1.], origin=[-1., -2., 0.])
+    scal1 = Field(domain=box, name='Scalar1')
+    scal2 = Field(domain=box, name='Scalar2')
+    velo = Field(domain=box, name='Velocity',
+                 formula=lambda x, y, z: (0., 0., 0.), isVector=True)
+    return {scal1: d3d, scal2: d3d}, velo
+
+
+
 def assertion(scal, advec):
     advec.discretize()
-    advec.setUp()
+    advec.setup()
     scal_d = scal.discreteFields.values()[0]
-    scal_d.data[0][...] = np.asarray(np.random.random(scal_d.data[0].shape),
-                                     dtype=PARMES_REAL, order=ORDER)
+    scal_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape))
     scal_init = npw.copy(scal_d.data[0])
 
     advec.apply(Simulation(tinit=0., tend=0.01, nbIter=1))
@@ -75,13 +92,13 @@ def assertion(scal, advec):
 
 def assertion_vector2D(scal, advec):
     advec.discretize()
-    advec.setUp()
+    advec.setup()
 
     scal_d = scal.discreteFields.values()[0]
-    scal_d.data[0][...] = np.asarray(np.random.random(scal_d.data[0].shape),
-                                     dtype=PARMES_REAL, order=ORDER)
-    scal_d.data[1][...] = np.asarray(np.random.random(scal_d.data[1].shape),
-                                     dtype=PARMES_REAL, order=ORDER)
+    scal_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape))
+    scal_d.data[1][...] = npw.asrealarray(
+        np.random.random(scal_d.data[1].shape))
     scal1_init = npw.copy(scal_d.data[0])
     scal2_init = npw.copy(scal_d.data[1])
 
@@ -94,15 +111,15 @@ def assertion_vector2D(scal, advec):
 
 def assertion_vector3D(scal, advec):
     advec.discretize()
-    advec.setUp()
+    advec.setup()
 
     scal_d = scal.discreteFields.values()[0]
-    scal_d.data[0][...] = np.asarray(np.random.random(scal_d.data[0].shape),
-                                     dtype=PARMES_REAL, order=ORDER)
-    scal_d.data[1][...] = np.asarray(np.random.random(scal_d.data[1].shape),
-                                     dtype=PARMES_REAL, order=ORDER)
-    scal_d.data[2][...] = np.asarray(np.random.random(scal_d.data[2].shape),
-                                     dtype=PARMES_REAL, order=ORDER)
+    scal_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal_d.data[0].shape))
+    scal_d.data[1][...] = npw.asrealarray(np.random.random(
+        scal_d.data[1].shape))
+    scal_d.data[2][...] = npw.asrealarray(
+        np.random.random(scal_d.data[2].shape))
     scal1_init = npw.copy(scal_d.data[0])
     scal2_init = npw.copy(scal_d.data[1])
     scal3_init = npw.copy(scal_d.data[2])
@@ -115,14 +132,14 @@ def assertion_vector3D(scal, advec):
 
 def assertion_list(scal, advec):
     advec.discretize()
-    advec.setUp()
+    advec.setup()
 
     scal1_d = scal[0].discreteFields.values()[0]
     scal2_d = scal[1].discreteFields.values()[0]
-    scal1_d.data[0][...] = np.asarray(np.random.random(scal1_d.data[0].shape),
-                                      dtype=PARMES_REAL, order=ORDER)
-    scal2_d.data[0][...] = np.asarray(np.random.random(scal2_d.data[0].shape),
-                                      dtype=PARMES_REAL, order=ORDER)
+    scal1_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal1_d.data[0].shape))
+    scal2_d.data[0][...] = npw.asrealarray(
+        np.random.random(scal2_d.data[0].shape))
     scal1_init = npw.copy(scal1_d.data[0])
     scal2_init = npw.copy(scal2_d.data[0])
 
@@ -138,10 +155,8 @@ def test_nullVelocity_2D():
     """
     scal, velo = setup_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17],
-                                   scal: [17, 17]},
-                      )
+    advec = Advection(velo, scal, discretization=d2d)
+
     assert assertion(scal, advec)
 
 
@@ -150,10 +165,7 @@ def test_nullVelocity_vector_2D():
     """
     scal, velo = setup_vector_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17],
-                                   scal: [17, 17]},
-                      )
+    advec = Advection(velo, scal, discretization=d2d)
     assert assertion_vector2D(scal, advec)
 
 
@@ -162,11 +174,7 @@ def test_nullVelocity_list_2D():
     """
     scal, velo = setup_list_2D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17],
-                                   scal[0]: [17, 17],
-                                   scal[1]: [17, 17]},
-                      )
+    advec = Advection(velo, scal, discretization=d2d)
     assert assertion_list(scal, advec)
 
 
@@ -175,10 +183,7 @@ def test_nullVelocity_3D():
     """
     scal, velo = setup_3D()
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
-                      )
+    advec = Advection(velo, scal, discretization=d3d)
     assert assertion(scal, advec)
 
 
@@ -186,29 +191,31 @@ def test_nullVelocity_vector_3D():
     """
     """
     scal, velo = setup_vector_3D()
+    advec = Advection(velo, scal, discretization=d3d)
 
-    advec = Advection(velo, scal,
-                      resolutions={velo: [17, 17, 17],
-                                   scal: [17, 17, 17]},
-                      )
     assert assertion_vector3D(scal, advec)
 
 
-# def test_nullVelocity_list_3D():
-#     """
-#     """
-#     scal, velo = setup_list_3D()
+def test_nullVelocity_list_3D():
+    """
+    """
+    scal, velo = setup_list_3D()
+
+    advec = Advection(velo, scal, discretization=d3d)
+
+    assert assertion_list(scal, advec)
+
+
+def test_nullVelocity_dict_3D():
+    scal, velo = setup_dict_3D()
+
+    advec = Advection(velocity=velo, variables=scal, discretization=d3d)
 
-#     advec = Advection(velo, scal,
-#                       resolutions={velo: [17, 17, 17],
-#                                    scal[0]: [17, 17, 17],
-#                                    scal[1]: [17, 17, 17]},
-#                       )
-#     assert assertion_list(scal, advec)
+    assert assertion_list(scal.keys(), advec)
 
-if __name__ == '__main__':
-    test_nullVelocity_2D()
+#if __name__ == '__main__':
+    #test_nullVelocity_2D()
     #test_nullVelocity_vector_2D()
-    test_nullVelocity_list_2D()
-    test_nullVelocity_3D()
-    test_nullVelocity_vector_3D()
+    #test_nullVelocity_list_2D()
+    #test_nullVelocity_3D()
+    #test_nullVelocity_vector_3D()
diff --git a/HySoP/hysop/operator/tests/test_penalization.py b/HySoP/hysop/operator/tests/test_penalization.py
index e4ead15ded791605e03994ad9fc4547ccec71d3a..7906da7aa6f8203877200d2e215802a24581d973 100644
--- a/HySoP/hysop/operator/tests/test_penalization.py
+++ b/HySoP/hysop/operator/tests/test_penalization.py
@@ -1,190 +1,337 @@
 # -*- coding: utf-8 -*-
-import parmepy as pp
-from parmepy.domain.obstacle.disk import HalfDisk, Disk
+from parmepy.domain.subsets.sphere import HemiSphere, Sphere
+from parmepy.domain.subsets.cylinder import Cylinder
+from parmepy.domain.subsets.porous import Porous
 from parmepy.operator.penalization import Penalization
-from parmepy.fields.continuous import Field
+from parmepy.operator.penalization_and_curl import PenalizationAndCurl
 from parmepy.problem.simulation import Simulation
-from parmepy.domain.obstacle.planes import PlaneBoundaries
+from parmepy.tools.parameters import Discretization, IO_params
+from parmepy.mpi.topology import Cartesian
+import parmepy.tools.numpywrappers as npw
+import numpy as np
+import os
+from parmepy import Field, Box
+from parmepy.operator.hdf_io import HDF_Reader
 
 
-def computeVel(x, y, ):
-    return 1, 1
+def v2d(res, x, y, t):
+    res[0][...] = 1.
+    res[1][...] = 1.
+    return res
 
 
-def computeScal(x, y, t):
-    return 1
+def s2d(res, x, y, t):
+    res[0][...] = 1.
+    return res
 
 
-def testPenalScal2D():
+def v3d(res, x, y, z, t):
+    res[0][...] = 1.
+    res[1][...] = 1.
+    res[2][...] = 1.
+    return res
+
+
+def s3d(res, x, y, z, t):
+    res[0][...] = 1.
+    return res
+
+
+Nx = 128
+Ny = 96
+Nz = 102
+g = 2
+
+
+ldef = npw.asrealarray([0.3, 0.4, 1.0])
+discr3D = Discretization([Nx + 1, Ny + 1, Nz + 1], [g - 1, g - 2, g])
+discr2D = Discretization([Nx + 1, Ny + 1], [g - 1, g])
+xdom = npw.asrealarray([0.1, -0.3, 0.5])
+import math
+ldom = npw.asrealarray([math.pi * 2., ] * 3)
+xdef = npw.asrealarray(xdom + 0.2)
+xpos = npw.asrealarray(ldom * 0.5)
+xpos[-1] += 0.1
+from parmepy.mpi import main_size
+working_dir = os.getcwd() + '/p' + str(main_size) + '/'
+
+
+def init(discr, fileref):
+    Cartesian.reset_counter()
+    dim = len(discr.resolution)
+    dom = Box(dimension=dim, length=ldom[:dim],
+              origin=xdom[:dim])
+    topo = dom.create_topology(discr)
+    scalref = Field(domain=topo.domain, name='scalref')
+    #    scalRef.hdf_load(topo, iop, restart=0)
+    veloref = Field(domain=topo.domain, name='veloref', isVector=True)
+    # Read a reference file
+    iop = IO_params(working_dir + fileref)
+    reader = HDF_Reader(variables={scalref: topo, veloref: topo},
+                        io_params=iop, restart=0)
+    reader.discretize()
+    reader.setup()
+    reader.apply()
+    reader.finalize()
+    sdref = scalref.discretization(topo)
+    vdref = veloref.discretization(topo)
+    return topo, sdref, vdref
+
+
+def check_penal(penal, sref, vref, scal, velo):
+    penal.discretize()
+    penal.setup()
+    topo = penal.variables[scal]
+    scal.initialize(topo=topo)
+    velo.initialize(topo=topo)
+    vd = velo.discretize(topo)
+    sd = scal.discretize(topo)
+    simu = Simulation(nbIter=3)
+    penal.apply(simu)
+    ind = topo.mesh.iCompute
+    assert np.allclose(sd.data[0][ind], sref.data[0][ind])
+    for d in xrange(vd.nbComponents):
+        assert np.allclose(vd.data[d][ind], vref.data[d][ind])
+
+
+def test_penal_2D():
     """
     Penalization in 2D, obstacles = semi-cylinder (disk indeed ...)
-    and a plate, field=scalar.
-    """
-    nb = 33
-    Lx = Ly = 2
-    dom = pp.Box(dimension=2, length=[Lx, Ly], origin=[-1., -1.])
-    resol = [nb, nb]
-    scal = Field(domain=dom, name='Scalar')
-    hcyl = HalfDisk(domain=dom, position=[0., 0.], radius=0.5,
-                    porousLayers=[0.13])
-    plates = PlaneBoundaries(domain=dom, normal_dir=1, thickness=0.1)
-    penal = Penalization(variables=scal, obstacles=[hcyl, plates], coeff=[1e6, 10],
-                         resolutions={scal: resol})
-    penal.discretize()
-    penal.setUp()
-    topo = scal.discreteFields.keys()[0]
-    scd = scal.discreteFields[topo]
-    scd[0] = 128
-    penal.apply(Simulation(nbIter=100))
-    scalRef = Field(domain=dom, name='ScalarRef')
-    scalRef.discretize(topo)
-    scalRef.load('ref_scal2D_PenalHspherePlane', fieldname='ScalarRef')
-    assert scalRef.norm() == scal.norm()
-
-
-def testPenalScal3D():
-    """
-    Penalization in 3D, obstacles = hemi-sphere and plates.
-    and a plate.
-    """
-    from parmepy.domain.obstacle.sphere import HemiSphere
-
-    nb = 33
-    Lx = Ly = Lz = 2
-    dom = pp.Box(dimension=3, length=[Lx, Ly, Lz], origin=[-1., -1., -1.])
-    resol = [nb, nb, nb]
-    scal = Field(domain=dom, name='Scalar')
-    hsphere = HemiSphere(domain=dom, position=[0., 0., 0.],
-                         radius=0.5, porousLayers=[0.13])
-
-    plates = PlaneBoundaries(domain=dom, normal_dir=0, thickness=0.1)
-    penal = Penalization(variables=scal, obstacles=[hsphere, plates],
-                         coeff=[1e6, 10],
-                         resolutions={scal: resol})
-    penal.discretize()
-    penal.setUp()
-    topo = scal.discreteFields.keys()[0]
-    scd = scal.discreteFields[topo]
-    scd[0] = 128
-    penal.apply(Simulation(nbIter=100))
-    scalRef = Field(domain=dom, name='ScalarRef')
-    scd = scalRef.discretize(topo)
-    scalRef.load('ref_scal3D_PenalHspherePlane', fieldname='ScalarRef')
-    assert scalRef.norm() == scal.norm()
-
-
-def testPenalScal2D_2():
-    """
-    Penalization in 2D, obstacles = cylinder and plate.
-    Field=scalar.
-    """
-    nb = 33
-    Lx = Ly = 2
-    dom = pp.Box(dimension=2, length=[Lx, Ly], origin=[-1., -1.])
-    resol = [nb, nb]
-    scal = Field(domain=dom, name='Scalar')
-    cyl = Disk(domain=dom, position=[0., 0.], radius=0.5,
-               porousLayers=[0.13])
-    penal = Penalization(variables=scal, obstacles=[cyl], coeff=[1e6, 10],
-                         resolutions={scal: resol})
-    penal.discretize()
-    penal.setUp()
-    topo = scal.discreteFields.keys()[0]
-    scd = scal.discreteFields[topo]
-    scd[0] = 128
-    penal.apply(Simulation(nbIter=100))
-    scalRef = Field(domain=dom, name='ScalarRef')
-    scalRef.discretize(topo)
-    scalRef.load('ref_scal2D_PenalSphere', fieldname='ScalarRef')
-    assert scalRef.norm() == scal.norm()
+    and a plate, fields=scalar and vector.
+    """
+    topo, sref, vref = init(discr2D, 'penal2d_sphere')
+    # Obstacles
+    rd = ldom[0] * 0.3
+    # Fields to penalize
+    scal = Field(domain=topo.domain, formula=s2d, name='Scalar')
+    velo = Field(domain=topo.domain, formula=v2d, name='Velo', isVector=True)
+    hsphere = HemiSphere(parent=topo.domain, origin=xpos[:2], radius=rd)
+    penal = Penalization(variables=[scal, velo], discretization=topo,
+                         obstacles=[hsphere], coeff=1e6)
+    hsphere.discretize(topo)
+    check_penal(penal, sref, vref, scal, velo)
 
 
-def testPenalScal3D_2():
+def test_penal_2D_multi():
     """
-    Penalization in 3D, obstacles = Sphere.
+    Penalization in 2D, for several different obstacles
     """
-    from parmepy.domain.obstacle.sphere import Sphere
+    topo, sref, vref = init(discr2D, 'penal2d_multi')
+    # Obstacles
+    rd = ldom[0] * 0.1
+    # Fields to penalize
+    scal = Field(domain=topo.domain, formula=s2d, name='Scalar')
+    velo = Field(domain=topo.domain, formula=v2d, name='Velo', isVector=True)
+    hsphere = Sphere(parent=topo.domain, origin=xpos[:2], radius=rd)
+    newpos = list(xpos)
+    newpos[1] += 1.
+    hsphere2 = HemiSphere(parent=topo.domain, origin=newpos[:2],
+                          radius=rd + 0.3)
+    ll = topo.domain.length.copy()
+    ll[1] = 0.
+    from parmepy.domain.subsets.boxes import SubBox
+    downplane = SubBox(parent=topo.domain, origin=topo.domain.origin,
+                       length=ll)
+    penal = Penalization(variables=[scal, velo], discretization=topo,
+                         obstacles=[hsphere, downplane, hsphere2], coeff=1e6)
+    check_penal(penal, sref, vref, scal, velo)
 
-    nb = 33
-    Lx = Ly = Lz = 2
-    dom = pp.Box(dimension=3, length=[Lx, Ly, Lz], origin=[-1., -1., -1.])
-    resol = [nb, nb, nb]
-    scal = Field(domain=dom, name='Scalar')
-    sphere = Sphere(domain=dom, position=[0., 0., 0.],
-                    radius=0.5, porousLayers=[0.13])
 
-    penal = Penalization(variables=scal, obstacles=[sphere], coeff=[1e6, 10],
-                         resolutions={scal: resol})
-    penal.discretize()
-    penal.setUp()
-    topo = scal.discreteFields.keys()[0]
-    scd = scal.discreteFields[topo]
-    scd[0] = 128
-    penal.apply(Simulation(nbIter=100))
-    scalRef = Field(domain=dom, name='ScalarRef')
-    scd = scalRef.discretize(topo)
-    scalRef.load('ref_scal3D_PenalSphere', fieldname='ScalarRef')
-    assert scalRef.norm() == scal.norm()
-
-
-def testPenalVec2D():
-
-    nb = 33
-    Lx = Ly = 2
-    dom = pp.Box(dimension=2, length=[Lx, Ly], origin=[-1., -1.])
-    resol = [nb, nb]
-    velo = Field(domain=dom, name='Velo', isVector=True)
-    hcyl = HalfDisk(domain=dom, position=[0., 0.], radius=0.5,
-                    porousLayers=[0.13])
-    plates = PlaneBoundaries(domain=dom, normal_dir=1, thickness=0.1)
-    penal = Penalization(variables=velo, obstacles=[hcyl, plates],
-                         coeff=[1e6, 10],
-                         resolutions={velo: resol})
-    penal.discretize()
-    penal.setUp()
-    topo = velo.discreteFields.keys()[0]
-    velod = velo.discreteFields[topo]
-    velod[0] = 128
-    velod[1] = 12
-    penal.apply(Simulation(nbIter=100))
-    veloRef = Field(domain=dom, name='VeloRef', isVector=True)
-    veloRef.discretize(topo)
-    veloRef.load('ref_velo2D_PenalHspherePlane', fieldname='VeloRef')
-    assert (veloRef.norm() == velo.norm()).all()
-
-
-def testPenalVec3D():
-    from parmepy.domain.obstacle.sphere import HemiSphere
-
-    nb = 33
-    Lx = Ly = Lz = 2
-    dom = pp.Box(dimension=3, length=[Lx, Ly, Lz], origin=[-1., -1., -1.])
-    resol = [nb, nb, nb]
-    velo = Field(domain=dom, name='Velo', isVector=True)
-    hsphere = HemiSphere(domain=dom, position=[0., 0., 0.],
-                         radius=0.5, porousLayers=[0.13])
-
-    plates = PlaneBoundaries(domain=dom, normal_dir=0, thickness=0.1)
-    penal = Penalization(variables=velo, obstacles=[hsphere, plates],
-                         coeff=[1e6, 10],
-                         resolutions={velo: resol})
+def test_penal_3D():
+    """
+    Penalization in 3D, obstacles = semi-cylinder (disk indeed ...)
+    and a plate, fields=scalar and vector.
+    """
+    topo, sref, vref = init(discr3D, 'penal3d_sphere')
+    # Obstacles
+    rd = ldom[0] * 0.3
+    # Fields to penalize
+    scal = Field(domain=topo.domain, formula=s3d, name='Scalar')
+    velo = Field(domain=topo.domain, formula=v3d, name='Velo', isVector=True)
+    hsphere = HemiSphere(parent=topo.domain, origin=xpos, radius=rd)
+    penal = Penalization(variables=[scal, velo], discretization=topo,
+                         obstacles=[hsphere], coeff=1e6)
+    check_penal(penal, sref, vref, scal, velo)
+
+
+def test_penal_3D_multi():
+    """
+    Penalization in 3D, for several different obstacles
+    """
+    topo, sref, vref = init(discr3D, 'penal3d_multi')
+    # Obstacles
+    rd = ldom[0] * 0.1
+    # Fields to penalize
+    scal = Field(domain=topo.domain, formula=s3d, name='Scalar')
+    velo = Field(domain=topo.domain, formula=v3d, name='Velo', isVector=True)
+    hsphere = Sphere(parent=topo.domain, origin=xpos, radius=rd)
+    newpos = list(xpos)
+    newpos[1] += 1.
+    hsphere2 = HemiSphere(parent=topo.domain, origin=newpos,
+                          radius=rd + 0.3)
+    ll = topo.domain.length.copy()
+    ll[1] = 0.
+    from parmepy.domain.subsets.boxes import SubBox
+    downplane = SubBox(parent=topo.domain, origin=topo.domain.origin,
+                       length=ll)
+    penal = Penalization(variables=[scal, velo], discretization=topo,
+                         obstacles=[hsphere, hsphere2, downplane], coeff=1e6)
+    check_penal(penal, sref, vref, scal, velo)
+
+
+def test_penal_3D_porous():
+    """
+    Penalization in 3D, with porous obstacles
+    """
+    topo, sref, vref = init(discr3D, 'penal3d_porous')
+    # Fields to penalize
+    scal = Field(domain=topo.domain, formula=s3d, name='Scalar')
+    velo = Field(domain=topo.domain, formula=v3d, name='Velo', isVector=True)
+    newpos = list(xpos)
+    newpos[1] += 1.
+    psphere = Porous(parent=topo.domain, origin=newpos,
+                     source=Sphere, layers=[0.5, 0.7, 0.3])
+    ll = topo.domain.length.copy()
+    ll[1] = 0.
+    from parmepy.domain.subsets.boxes import SubBox
+    downplane = SubBox(parent=topo.domain, origin=topo.domain.origin,
+                       length=ll)
+    penal = Penalization(variables=[scal, velo], discretization=topo,
+                         obstacles={psphere: [1e6, 1e2, 1e1], downplane: 1e7})
+    check_penal(penal, sref, vref, scal, velo)
+
+
+def test_penal_3D_porous_cyl():
+    """
+    Penalization in 3D, with porous obstacles
+    """
+    topo, sref, vref = init(discr3D, 'penal3d_porous_cyl')
+    # Fields to penalize
+    scal = Field(domain=topo.domain, formula=s3d, name='Scalar')
+    velo = Field(domain=topo.domain, formula=v3d, name='Velo', isVector=True)
+    newpos = list(xpos)
+    newpos[1] += 1.
+    pcyl = Porous(parent=topo.domain, origin=newpos,
+                  source=Cylinder, layers=[0.5, 0.7, 0.3])
+    ll = topo.domain.length.copy()
+    ll[1] = 0.
+    from parmepy.domain.subsets.boxes import SubBox
+    downplane = SubBox(parent=topo.domain, origin=topo.domain.origin,
+                       length=ll)
+    penal = Penalization(variables=[scal, velo], discretization=topo,
+                         obstacles={pcyl: [1e6, 0.1, 1e6], downplane: 1e7})
+    check_penal(penal, sref, vref, scal, velo)
+
+
+def test_penal_2D_porous():
+    """
+    Penalization in 2D, with porous obstacles
+    """
+    topo, sref, vref = init(discr2D, 'penal2d_porous')
+    # Fields to penalize
+    scal = Field(domain=topo.domain, formula=s2d, name='Scalar')
+    velo = Field(domain=topo.domain, formula=v2d, name='Velo', isVector=True)
+    newpos = list(xpos)
+    newpos[1] += 1.
+    psphere = Porous(parent=topo.domain, origin=newpos[:2],
+                     source=Sphere, layers=[0.5, 0.7, 0.3])
+    ll = topo.domain.length.copy()
+    ll[1] = 0.
+    from parmepy.domain.subsets.boxes import SubBox
+    downplane = SubBox(parent=topo.domain, origin=topo.domain.origin,
+                       length=ll)
+    penal = Penalization(variables=[scal, velo], discretization=topo,
+                         obstacles={psphere: [1e6, 1e2, 1e1], downplane: 1e7})
+    check_penal(penal, sref, vref, scal, velo)
+
+
+def init_vort(discr, fileref):
+    Cartesian.reset_counter()
+    dim = len(discr.resolution)
+    dom = Box(dimension=dim, length=ldom[:dim],
+              origin=xdom[:dim])
+    topo = dom.create_topology(discr)
+    wref = Field(domain=topo.domain, name='vortiref', isVector=dim == 3)
+    #    scalRef.hdf_load(topo, iop, restart=0)
+    veloref = Field(domain=topo.domain, name='veloref', isVector=True)
+    # Read a reference file
+    iop = IO_params(working_dir + fileref)
+    reader = HDF_Reader(variables={wref: topo, veloref: topo},
+                        io_params=iop, restart=0)
+    reader.discretize()
+    reader.setup()
+    reader.apply()
+    reader.finalize()
+    wdref = wref.discretize(topo)
+    vdref = veloref.discretize(topo)
+    return topo, wdref, vdref
+
+
+def check_penal_vort(penal, wref, vref, vorti, velo):
     penal.discretize()
-    penal.setUp()
-    topo = velo.discreteFields.keys()[0]
-    velod = velo.discreteFields[topo]
-    velod[0] = 128
-    velod[1] = 12
-    velod[2] = 4.3
-    penal.apply(Simulation(nbIter=100))
-    veloRef = Field(domain=dom, name='VeloRef', isVector=True)
-    veloRef.discretize(topo)
-    veloRef.load('ref_velo3D_PenalHspherePlane', fieldname='VeloRef')
-    assert (veloRef.norm() == velo.norm()).all()
+    penal.setup()
+    topo = penal.variables[vorti]
+    vorti.initialize(topo=topo)
+    velo.initialize(topo=topo)
+    vd = velo.discretize(topo)
+    wd = vorti.discretize(topo)
+    simu = Simulation(nbIter=3)
+    penal.apply(simu)
+    ind = topo.mesh.iCompute
+    for d in xrange(vd.nbComponents):
+        print np.max(np.abs(vd.data[d][ind]- vref.data[d][ind])),np.max(np.abs((vd.data[d][ind]- vref.data[d][ind])/vd.data[d][ind]))
+        assert np.allclose(vd.data[d][ind], vref.data[d][ind])
+    for d in xrange(wd.nbComponents):
+        print np.max(np.abs(wd.data[d][ind]- wref.data[d][ind])),np.max(np.abs((wd.data[d][ind]- wref.data[d][ind])/wd.data[d][ind]))
+        assert np.allclose(wd.data[d][ind], wref.data[d][ind])
+
+
+def test_penal_vort_2D():
+    """
+    Penalization + Curl in 2D, obstacles = semi-cylinder (disk indeed ...)
+    and a plate, fields=scalar and vector.
+    """
+    d2D = Discretization([Nx + 1, Ny + 1], [g, g])
+    topo, wref, vref = init_vort(d2D, 'penal_vort_2d_sphere')
+    # Obstacles
+    rd = ldom[0] * 0.3
+    # Fields to penalize
+    vorti = Field(domain=topo.domain, formula=s2d, name='Vorti')
+    velo = Field(domain=topo.domain, formula=v2d, name='Velo', isVector=True)
+    hsphere = HemiSphere(parent=topo.domain, origin=xpos[:2], radius=rd)
+    penal = PenalizationAndCurl(velocity=velo, vorticity=vorti,
+                                discretization=topo,
+                                obstacles=[hsphere], coeff=1e8)
+    hsphere.discretize(topo)
+    check_penal_vort(penal, wref, vref, vorti, velo)
+
+
+def test_penal_vort_3D():
+    """
+    Penalization in 3D, obstacles = semi-cylinder
+    and a plate, fields=scalar and vector.
+    """
+    d3D = Discretization([Nx + 1, Ny + 1, Nz + 1], [g, g, g])
+    topo, wref, vref = init_vort(d3D, 'penal_vort_3d_sphere')
+    # Obstacles
+    rd = ldom[0] * 0.3
+    # Fields to penalize
+    vorti = Field(domain=topo.domain, formula=v3d, name='Vorti', isVector=True)
+    velo = Field(domain=topo.domain, formula=v3d, name='Velo', isVector=True)
+    hsphere = HemiSphere(parent=topo.domain, origin=xpos, radius=rd)
+    penal = PenalizationAndCurl(velocity=velo, vorticity=vorti,
+                                discretization=topo,
+                                obstacles=[hsphere], coeff=1e8)
+    check_penal_vort(penal, wref, vref, vorti, velo)
+
 
 if __name__ == "__main__":
-    testPenalScal2D()
-    ## testPenalScal3D()
-    ## testPenalScal2D_2()
-    ## testPenalScal3D_2()
-    ## testPenalVec2D()
-    ## testPenalVec3D()
+    test_penal_2D()
+    test_penal_3D()
+    test_penal_2D_multi()
+    test_penal_3D_multi()
+    test_penal_2D_porous()
+    test_penal_3D_porous()
+    test_penal_3D_porous_cyl()
+    test_penal_vort_3D()
+    test_penal_vort_2D()
diff --git a/HySoP/hysop/operator/tests/test_poisson.py b/HySoP/hysop/operator/tests/test_poisson.py
index eb95b4697b902c951599d95b275456b51b855588..72ab7bdfdd6b71dddfab7fc25d1a154feb49a012 100755
--- a/HySoP/hysop/operator/tests/test_poisson.py
+++ b/HySoP/hysop/operator/tests/test_poisson.py
@@ -3,8 +3,11 @@
 import parmepy as pp
 from parmepy.operator.poisson import Poisson
 from parmepy.operator.analytic import Analytic
+from parmepy.operator.reprojection import Reprojection
 from parmepy.problem.simulation import Simulation
+from parmepy.tools.parameters import Discretization
 import numpy as np
+import parmepy.tools.numpywrappers as npw
 import math
 pi = math.pi
 sin = np.sin
@@ -12,12 +15,15 @@ cos = np.cos
 
 ## Physical Domain description
 dim = 3
-LL = 2 * pi * np.ones((dim))
+LL = 2 * pi * npw.ones((dim))
 # formula to compute initial vorticity field
 coeff = 4 * pi ** 2 * (LL[1] ** 2 * LL[2] ** 2 + LL[0] ** 2 * LL[2] ** 2 +
                        LL[0] ** 2 * LL[1] ** 2) / (LL[0] ** 2 * LL[1] ** 2
                                                    * LL[2] ** 2)
 cc = 2 * pi / LL
+d3D = Discretization([33, 257, 257])
+d2D = Discretization([33, 33])
+uinf = 1.0
 
 
 def computeVort(res, x, y, z, t):
@@ -44,90 +50,218 @@ def computeRef(res, x, y, z, t):
     return res
 
 
+# ref. field
+def computeRef_with_correction(res, x, y, z, t):
+    res[0][...] = -2. * pi / LL[1] * \
+        (cos(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])) \
+        - 2. * pi / LL[2] * (cos(x * cc[0]) * sin(y * cc[1]) * cos(z * cc[2]))\
+        + uinf
+
+    res[1][...] = -2. * pi / LL[2] * \
+        (sin(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])) \
+        + 2. * pi / LL[0] * (sin(x * cc[0]) * cos(y * cc[1]) * sin(z * cc[2]))
+
+    res[2][...] = -2. * pi / LL[0] * \
+        (sin(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])) \
+        - 2. * pi / LL[1] * (sin(x * cc[0]) * cos(y * cc[1]) * cos(z * cc[2]))
+
+    return res
+
+
 def computeVort2D(res, x, y, t):
     # todo ...
-    res[0][...] = coeff * sin(x * cc[0]) * sin(y * cc[1])
-    res[1][...] = coeff * cos(x * cc[0]) * sin(y * cc[1])
+    res[0][...] = 4 * pi ** 2 * (cos(x * cc[0]) * sin(y * cc[1])) * \
+        (1. / LL[0] ** 2 + 1. / LL[1] ** 2)
     return res
 
 
 # ref. field
 def computeRef2D(res, x, y, t):
-    # todo : find a proper ref
-    res[0][...] = 1.
-    res[1][...] = 1.
+    res[0][...] = 2. * pi / LL[1] * (cos(x * cc[0]) * cos(y * cc[1]))
+    res[1][...] = 2. * pi / LL[0] * (sin(x * cc[0]) * sin(y * cc[1]))
+
     return res
 
 
 def test_Poisson3D():
-    dom = pp.Box(dimension=dim, length=LL)
+    dom = pp.Box(length=LL)
 
     # Fields
     velocity = pp.Field(domain=dom, isVector=True, name='Velocity')
     vorticity = pp.Field(domain=dom, formula=computeVort,
                          name='Vorticity', isVector=True)
 
-    resol = [33, 33, 33]
     # Definition of the Poisson operator
-    poisson = Poisson(velocity, vorticity,
-                      resolutions={velocity: resol, vorticity: resol})
+    poisson = Poisson(velocity, vorticity, discretization=d3D)
+
+    poisson.discretize()
+    poisson.setup()
+    topo = poisson.discreteFields[vorticity].topology
+    # Analytic operator to compute the reference field
+    ref = pp.Field(domain=dom, name='reference', isVector=True)
+    refOp = Analytic(variables={ref: topo}, formula=computeRef)
+    simu = Simulation(nbIter=10)
+    refOp.discretize()
+    refOp.setup()
+    vorticity.initialize(topo=topo)
+    poisson.apply(simu)
+    refOp.apply(simu)
+    assert np.allclose(ref.norm(topo), velocity.norm(topo))
+    refD = ref.discretization(topo)
+    vd = velocity.discretization(topo)
+    for i in range(dom.dimension):
+        assert np.allclose(vd[i], refD[i])
+    poisson.finalize()
+
+
+def test_Poisson2D():
+    dom = pp.Box(length=[2. * pi, 2. * pi], origin=[0., 0.])
+
+    # Fields
+    velocity = pp.Field(domain=dom, isVector=True, name='Velocity')
+    vorticity = pp.Field(domain=dom, formula=computeVort2D, name='Vorticity')
+
+    # Definition of the Poisson operator
+    poisson = Poisson(velocity, vorticity, discretization=d2D)
 
     poisson.discretize()
-    poisson.setUp()
+    poisson.setup()
     topo = poisson.discreteFields[vorticity].topology
     # Analytic operator to compute the reference field
     ref = pp.Field(domain=dom, name='reference', isVector=True)
-    refOp = Analytic(variables=ref, formula=computeRef,
-                     topo=topo)
+    refOp = Analytic(variables={ref: topo}, formula=computeRef2D)
     simu = Simulation(nbIter=10)
     refOp.discretize()
-    refOp.setUp()
+    refOp.setup()
     vorticity.initialize(topo=topo)
     poisson.apply(simu)
     refOp.apply(simu)
-    assert np.allclose(ref.norm(), velocity.norm())
+
+    assert np.allclose(ref.norm(topo), velocity.norm(topo))
     refD = ref.discretization(topo)
     vd = velocity.discretization(topo)
 
-    assert np.allclose(ref.norm(), velocity.norm())
+    assert np.allclose(ref.norm(topo), velocity.norm(topo))
     for i in range(dom.dimension):
         assert np.allclose(vd[i], refD[i])
     poisson.finalize()
 
 
-## def test_Poisson2D():
-##     dom = pp.Box(dimension=2, length=[2. * pi, 2. * pi], origin=[0., 0.])
+def test_Poisson3D_correction():
+    dom = pp.Box(length=LL)
 
-##     # Fields
-##     velocity = pp.Field(domain=dom, isVector=True, name='Velocity')
-##     vorticity = pp.Field(domain=dom, formula=computeVort2D,
-##                          name='Vorticity', isVector=True)
+    # Fields
+    velocity = pp.Field(domain=dom, isVector=True, name='Velocity')
+    vorticity = pp.Field(domain=dom, formula=computeVort,
+                         name='Vorticity', isVector=True)
 
-##     resol = [33, 33]
-##     # Definition of the Poisson operator
-##     poisson = Poisson(velocity, vorticity,
-##                       resolutions={velocity: resol, vorticity: resol})
+    # Definition of the Poisson operator
+    ref_rate = npw.zeros(3)
+    ref_rate[0] = uinf * LL[1] * LL[2]
+    rate = pp.VariableParameter(data=ref_rate, name='flowrate')
+    poisson = Poisson(velocity, vorticity, discretization=d3D, flowrate=rate)
 
-##     # Analytic operator to compute the reference field
-##     ref = pp.Field(domain=dom, name='reference', isVector=True)
-##     refOp = Analytic(ref, formula=computeRef2D,
-##                      resolutions={ref: resol})
+    poisson.discretize()
+    poisson.setup()
+    topo = poisson.discreteFields[vorticity].topology
+    # Analytic operator to compute the reference field
+    ref = pp.Field(domain=dom, name='reference', isVector=True)
+    refOp = Analytic(variables={ref: topo}, formula=computeRef_with_correction)
+    simu = Simulation(nbIter=10)
+    refOp.discretize()
+    refOp.setup()
+    vorticity.initialize(topo=topo)
 
-##     simu = Simulation(nbIter=10)
-##     refOp.setUp()
-##     poisson.setUp()
-##     refOp.apply(simu)
-##     vorticity.initialize()
-##     poisson.apply(simu)
-## #    assert np.allclose(ref.norm(), velocity.norm())
-##     poisson.finalize()
-##  #   refD = ref.discreteFields.values()[0]
-##  #   vd = velocity.discreteFields.values()[0]
+    poisson.apply(simu)
+    refOp.apply(simu)
+    refD = ref.discretization(topo)
+    vd = velocity.discretization(topo)
+    from parmepy.domain.subsets.boxes import SubBox
+    surf = SubBox(parent=dom, origin=dom.origin,
+                  length=[0., LL[1], LL[2]])
+    surf.discretize(topo)
+    assert np.allclose(ref.norm(topo), velocity.norm(topo))
+    assert np.allclose(ref.norm(topo), velocity.norm(topo))
+    for i in range(dom.dimension):
+        assert np.allclose(vd[i], refD[i])
+    poisson.finalize()
+
+
+def test_Poisson3D_projection_1():
+    dom = pp.Box(length=LL)
+
+    # Fields
+    velocity = pp.Field(domain=dom, isVector=True, name='Velocity')
+    vorticity = pp.Field(domain=dom, formula=computeVort,
+                         name='Vorticity', isVector=True)
 
-## #    assert np.allclose(ref.norm(), velocity.norm())
-## #    for i in range(dom.dimension):
-## #        assert np.allclose(vd[i], refD[i])
+    # Definition of the Poisson operator
+    poisson = Poisson(velocity, vorticity, discretization=d3D, projection=4)
+
+    poisson.discretize()
+    poisson.setup()
+    topo = poisson.discreteFields[vorticity].topology
+    # Analytic operator to compute the reference field
+    ref = pp.Field(domain=dom, name='reference', isVector=True)
+    refOp = Analytic(variables={ref: topo}, formula=computeRef)
+    simu = Simulation(nbIter=10)
+    refOp.discretize()
+    refOp.setup()
+    vorticity.initialize(topo=topo)
+    poisson.apply(simu)
+    refOp.apply(simu)
+    assert np.allclose(ref.norm(topo), velocity.norm(topo))
+    refD = ref.discretization(topo)
+    vd = velocity.discretization(topo)
+
+    assert np.allclose(ref.norm(topo), velocity.norm(topo))
+    for i in range(dom.dimension):
+        assert np.allclose(vd[i], refD[i])
+
+    poisson.finalize()
+
+
+def test_Poisson3D_projection_2():
+    dom = pp.Box(length=LL)
+
+    # Fields
+    velocity = pp.Field(domain=dom, isVector=True, name='Velocity')
+    vorticity = pp.Field(domain=dom, formula=computeVort,
+                         name='Vorticity', isVector=True)
+    d3dG = Discretization([33, 33, 33], [2, 2, 2])
+    # Definition of the Poisson operator
+    proj = Reprojection(vorticity, threshold=0.05, frequency=4,
+                        discretization=d3dG, io_params=True)
+
+    poisson = Poisson(velocity, vorticity, discretization=d3D,
+                      projection=proj)
+    proj.discretize()
+    poisson.discretize()
+    poisson.setup()
+    proj.setup()
+    topo = poisson.discreteFields[vorticity].topology
+    # Analytic operator to compute the reference field
+    ref = pp.Field(domain=dom, name='reference', isVector=True)
+    refOp = Analytic(variables={ref: topo}, formula=computeRef)
+    simu = Simulation(nbIter=10)
+    refOp.discretize()
+    refOp.setup()
+    vorticity.initialize(topo=topo)
+    poisson.apply(simu)
+    refOp.apply(simu)
+    assert np.allclose(ref.norm(topo), velocity.norm(topo))
+    refD = ref.discretization(topo)
+    vd = velocity.discretization(topo)
+
+    assert np.allclose(ref.norm(topo), velocity.norm(topo))
+    for i in range(dom.dimension):
+        assert np.allclose(vd[i], refD[i])
+    poisson.finalize()
 
 # This may be useful to run mpi tests
 if __name__ == "__main__":
     test_Poisson3D()
+    test_Poisson2D()
+    test_Poisson3D_correction()
+    test_Poisson3D_projection_1()
+    test_Poisson3D_projection_2()
diff --git a/HySoP/hysop/operator/tests/test_redistribute.py b/HySoP/hysop/operator/tests/test_redistribute.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8fa8c75a94a259ee60a6108764bb1a260466966
--- /dev/null
+++ b/HySoP/hysop/operator/tests/test_redistribute.py
@@ -0,0 +1,85 @@
+from parmepy.operator.redistribute_intra import RedistributeIntra
+from parmepy.mpi.topology import Cartesian
+from parmepy.mpi.main_var import main_size, main_rank, main_comm
+import parmepy as pp
+import numpy as np
+
+
+# Func to initialize fields
+def func_vec_1(res, x, y, z, t):
+    res[0][...] = x
+    res[1][...] = 0.1 * y
+    res[2][...] = 10. * z * z
+    return res
+
+def init_context():
+    # MPI procs are distributed among two tasks
+    dim = 3
+    nb = 32
+    ## Domain
+    box = pp.Box(dim, length=[1., 1., 1.], origin=[0., 0., 0.])
+
+    ## Global resolution
+    nbElem = [nb] * dim
+
+    # Setup for MPI tasks
+    GPU = 4
+    CPU = 1
+    proc_tasks = [CPU,] * main_size
+    if main_size > 1:
+        proc_tasks[-1] = GPU
+    comm_s = main_comm.Split(color=proc_tasks[main_rank], key=main_rank)
+
+    isGPU = proc_tasks[main_rank] == GPU
+    isCPU = proc_tasks[main_rank] == CPU
+    
+    ghosts = np.ones((box.dimension)) * 2
+
+    if isCPU:
+        topo = Cartesian(box, box.dimension, nbElem, ghosts=ghosts,
+                         comm=comm_s, task_id=CPU)
+        topo2 = Cartesian(box, 2, nbElem, comm=comm_s, task_id=CPU)
+    
+    elif isGPU:
+        topo = None
+        topo2 = None
+        
+    from parmepy import Field
+    velo = Field(domain=box, name='Velocity',
+                 formula=func_vec_1, isVector=True)
+    if isCPU:
+        velo.discretize(topo)
+        velo.discretize(topo2)
+        velo.initialize(topo=topo)
+        
+    return velo, topo, topo2, isCPU
+
+def test_distribute_intra_1():
+    velo, topo, topo2, isCPU = init_context()
+    br = RedistributeIntra(variables=[velo], source=topo, target=topo2)
+    if isCPU:
+        br.setup()
+        br.apply()
+        br.wait()
+    
+    n1 = velo.norm(topo)
+    n2 = velo.norm(topo2)
+    if isCPU:
+        assert np.allclose(n1, n2)
+    
+def test_distribute_intra_2():
+    velo, topo, topo2, isCPU = init_context()
+    br = RedistributeIntra(variables={velo: (topo, topo2)})
+    if isCPU:
+        br.setup()
+        br.apply()
+        br.wait()
+
+    if isCPU:
+        assert np.allclose(n1, n2)
+
+# This may be useful to run mpi tests
+if __name__ == "__main__":
+   test_distribute_intra_1()
+   test_distribute_intra_2()
+    
diff --git a/HySoP/hysop/operator/tests/test_reprojection.py b/HySoP/hysop/operator/tests/test_reprojection.py
new file mode 100644
index 0000000000000000000000000000000000000000..e752f163b211dfbbd40e027c43dfb062b0ee5ea9
--- /dev/null
+++ b/HySoP/hysop/operator/tests/test_reprojection.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+import parmepy as pp
+from parmepy.operator.reprojection import Reprojection
+from parmepy.problem.simulation import Simulation
+import numpy as np
+from parmepy.mpi.topology import Cartesian
+import math
+import parmepy.tools.numpywrappers as npw
+from parmepy.tools.parameters import Discretization
+pi = np.pi
+cos = np.cos
+sin = np.sin
+# Upstream flow velocity
+uinf = 1.0
+tol = 1e-12
+
+
+## Function to compute TG velocity
+def computeVel(res, x, y, z, t):
+    res[0][...] = sin(x) * cos(y) * cos(z)
+    res[1][...] = - cos(x) * sin(y) * cos(z)
+    res[2][...] = 0.
+    return res
+
+
+## Function to compute reference vorticity
+def computeVort(res, x, y, z, t):
+    res[0][...] = - cos(x) * sin(y) * sin(z)
+    res[1][...] = - sin(x) * cos(y) * sin(z)
+    res[2][...] = 2. * sin(x) * sin(y) * cos(z)
+    return res
+
+## Global resolution
+d3D = Discretization([33, 33, 33], [2, 2, 2])
+
+
+def test_reprojection():
+    # Domain
+    box = pp.Box(length=[2.0 * pi, pi, pi])
+    # Vector Fields
+    vorti = pp.Field(domain=box, formula=computeVort,
+                     name='Vorticity', isVector=True)
+
+    # Usual Cartesian topology definition
+    topo = Cartesian(box, dim=box.dimension, discretization=d3D)
+
+    op = Reprojection(vorti, threshold=0.05, frequency=4,
+                      discretization=topo, io_params=True)
+    op.discretize()
+    op.setup()
+    # === Simulation setup ===
+    simu = Simulation(nbIter=8)
+    # init fields
+    vorti.initialize(topo=topo)
+    # Apply correction
+    simu.initialize()
+    while not simu.isOver:
+        op.apply(simu)
+        simu.advance()
diff --git a/HySoP/hysop/operator/tests/test_velocity_correction.py b/HySoP/hysop/operator/tests/test_velocity_correction.py
index 018d3f66e84db43293e7c0887e803c330ec57e85..74c97c313917fa31102a045310b135eec0818729 100755
--- a/HySoP/hysop/operator/tests/test_velocity_correction.py
+++ b/HySoP/hysop/operator/tests/test_velocity_correction.py
@@ -4,17 +4,8 @@ import parmepy as pp
 from parmepy.operator.velocity_correction import VelocityCorrection
 from parmepy.problem.simulation import Simulation
 import numpy as np
-from parmepy.mpi.topology import Cartesian
-import math
-pi = math.pi
-sin = np.sin
-cos = np.cos
 import parmepy.tools.numpywrappers as npw
-
-## Physical Domain description
-print (" ========= Start Navier-Stokes 3D (Flow past bluff bodies) =========")
-
-## pi constant
+from parmepy.tools.parameters import Discretization
 pi = np.pi
 cos = np.cos
 sin = np.sin
@@ -51,63 +42,47 @@ def computeVort2D(res, x, y, t):
     res[0][...] = - cos(x) * sin(y)
     return res
 
+## Global resolution
+g = 0
+d2D = Discretization([33, 33], [g, g])
+d3D = Discretization([33, 33, 33], [g, g, g])
 
-def test_velocity_correction_3D():
-    dim = 3
-    ## Domain
-    boxlength = npw.realarray([2.0 * pi, pi, pi])
-    boxorigin = npw.realarray([0., 0., 0.])
-    box = pp.Box(dim, length=boxlength, origin=boxorigin)
-
-    ## Global resolution
-    nbElem = [33, 33, 33]
 
-    ## Vector Fields
+def test_velocity_correction_3D():
+    # Domain
+    box = pp.Box(length=[2.0 * pi, pi, pi])
+    # Vector Fields
     velo = pp.Field(domain=box, formula=computeVel,
                     name='Velocity', isVector=True)
     vorti = pp.Field(domain=box, formula=computeVort,
                      name='Vorticity', isVector=True)
 
-    ## Usual Cartesian topology definition
-    NBGHOSTS = 2
-    ghosts = np.ones((box.dimension)) * NBGHOSTS
-    topo = Cartesian(box, box.dimension, nbElem,
-                     ghosts=ghosts)
+    # Usual Cartesian topology definition
+    topo = box.create_topology(discretization=d3D)
 
-    op = {}
     ref_rate = npw.zeros(3)
     ref_rate[0] = uinf * box.length[1] * box.length[2]
     rate = pp.VariableParameter(data=ref_rate, name='flowrate')
-    op['correction'] = VelocityCorrection(velo, vorti, req_flowrate=rate,
-                                          topo=topo)
-    op['correction'].discretize()
-    op['correction'].setUp()
+    op = VelocityCorrection(velo, vorti, req_flowrate=rate,
+                            discretization=topo, io_params={})
+    op.discretize()
+    op.setup()
     # === Simulation setup ===
     simu = Simulation(tinit=0.0, tend=5., timeStep=0.005, iterMax=1000000)
     # init fields
     velo.initialize(topo=topo)
     vorti.initialize(topo=topo)
-
     # Apply correction
-    op['correction'].apply(simu)
+    op.apply(simu)
     # check new flowrate values
-    sref = op['correction'].discreteOperator.surfRef
-    flowrate = velo.integrateOnSurface(sref, topo)
-    assert (flowrate - ref_rate[0]) < tol
-    for i in xrange(1, dim):
-        flowrate2 = velo.integrateOnSurface(sref, topo, component=i)
-        assert flowrate2 < tol
+    sref = op.cb.surf[0]
+    flowrate = sref.integrate_field_allc(velo, topo)
+    assert (np.abs(flowrate - ref_rate) < tol).all()
 
 
 def test_velocity_correction_2D():
-    dim = 2
     ## Domain
-    boxlength = npw.realarray([2.0 * pi, pi])
-    boxorigin = npw.realarray([0., 0.])
-    box = pp.Box(dim, length=boxlength, origin=boxorigin)
-
-    ## Global resolution
-    nbElem = [33, 33]
+    box = pp.Box(length=[2.0 * pi, pi], origin=[0., 0.])
 
     ## Vector Fields
     velo = pp.Field(domain=box, formula=computeVel2D,
@@ -116,19 +91,15 @@ def test_velocity_correction_2D():
                      name='Vorticity', isVector=False)
 
     ## Usual Cartesian topology definition
-    NBGHOSTS = 2
-    ghosts = np.ones((box.dimension)) * NBGHOSTS
-    topo = Cartesian(box, box.dimension, nbElem,
-                     ghosts=ghosts)
+    topo = box.create_topology(discretization=d2D)
 
-    op = {}
     ref_rate = npw.zeros(2)
     ref_rate[0] = uinf * box.length[1]
     rate = pp.VariableParameter(data=ref_rate, name='flowrate')
-    op['correction'] = VelocityCorrection(velo, vorti,
-                                          req_flowrate=rate, topo=topo)
-    op['correction'].discretize()
-    op['correction'].setUp()
+    op = VelocityCorrection(velo, vorti, req_flowrate=rate,
+                            discretization=topo)
+    op.discretize()
+    op.setup()
     # === Simulation setup ===
     simu = Simulation(tinit=0.0, tend=5., timeStep=0.005, iterMax=1000000)
     # init fields
@@ -136,14 +107,11 @@ def test_velocity_correction_2D():
     vorti.initialize(topo=topo)
 
     # Apply correction
-    op['correction'].apply(simu)
+    op.apply(simu)
     # check new flowrate values
-    sref = op['correction'].discreteOperator.surfRef
-    flowrate = velo.integrateOnSurface(sref, topo)
-    assert (flowrate - ref_rate[0]) < tol
-    for i in xrange(1, dim):
-        flowrate2 = velo.integrateOnSurface(sref, topo, component=i)
-        assert flowrate2 < tol
+    sref = op.cb.surf[0]
+    flowrate = sref.integrate_field_allc(velo, topo)
+    assert (np.abs(flowrate - ref_rate) < tol).all()
 
 # This may be useful to run mpi tests
 if __name__ == "__main__":
diff --git a/HySoP/hysop/operator/velocity_correction.py b/HySoP/hysop/operator/velocity_correction.py
index fc663a24775a4ce61fe830d8c49fdffec939ba32..3485d724b64928c7e68d976ad2e326fad4344a5f 100755
--- a/HySoP/hysop/operator/velocity_correction.py
+++ b/HySoP/hysop/operator/velocity_correction.py
@@ -7,11 +7,12 @@ Operator to shift velocity to fit with a required input flowrate.
 """
 from parmepy.constants import debug
 from parmepy.operator.discrete.velocity_correction import VelocityCorrection_D
-from parmepy.operator.continuous import Operator
-from parmepy.domain.obstacle.controlBox import ControlBox
+from parmepy.operator.computational import Computational
+from parmepy.domain.subsets.control_box import ControlBox
+from parmepy.operator.continuous import opsetup
 
 
-class VelocityCorrection(Operator):
+class VelocityCorrection(Computational):
     """
     The velocity field is corrected after solving the
     Poisson equation. For more details about calculations,
@@ -20,7 +21,7 @@ class VelocityCorrection(Operator):
     """
 
     @debug
-    def __init__(self, velocity, vorticity, req_flowrate, surf=None, **kwds):
+    def __init__(self, velocity, vorticity, req_flowrate, **kwds):
         """
         Corrects the values of the velocity field after
         solving Poisson equation in order to prescribe proper
@@ -30,11 +31,9 @@ class VelocityCorrection(Operator):
         @param[in] vorticity field used to compute correction
         @param resolutions : grid resolutions of velocity and vorticity
         @param[in] req_flowrate : required value for the flowrate
-        @param[in] surf : surface (parmepy.domain.obstacle.planes.SubPlane)
-        used to compute reference flow rates. Default = surface at x_origin,
-        normal to x-dir.
         @param topo : a predefined topology to discretize velocity/vorticity
         """
+        assert 'variables' not in kwds, 'variables parameter is useless.'
         super(VelocityCorrection, self).__init__(variables=[velocity,
                                                             vorticity], **kwds)
         ## velocity variable (vector)
@@ -44,36 +43,36 @@ class VelocityCorrection(Operator):
 
         self.input = [self.velocity, self.vorticity]
         self.output = [self.velocity]
-        ## A 'reference' surface on which flow rates will be computed
-        ## (usually, surface normal to the flow at origin)
-        self.surfRef = surf
-        ## Expected value for the flow rate through self.surfRef
+        ## Expected value for the flow rate through input surface
         self.req_flowrate = req_flowrate
         dom = self.velocity.domain
-        self.cb = ControlBox(domain=dom, origin=dom.origin, lengths=dom.length)
+        self.cb = ControlBox(origin=dom.origin, length=dom.length,
+                             parent=dom)
+        ## Extra parameters that may be required for discrete operator
+        ## (at the time, only io_params)
+        self.config = kwds
 
     def discretize(self):
         super(VelocityCorrection, self)._standard_discretize()
+        assert self._single_topo, 'Multi-resolution case is not allowed.'
 
     @debug
-    def setUp(self):
-        self.discreteOperator =\
-            VelocityCorrection_D(self.discreteFields[self.velocity],
-                                 self.discreteFields[self.vorticity],
-                                 self.req_flowrate, self.cb)
-
-        self.discreteOperator.setUp()
-        self._isUpToDate = True
-
-    def apply(self, simulation=None):
-        """
-        Compute correction and add it to current velocoty
-        """
-        self.discreteOperator.apply(simulation)
+    @opsetup
+    def setup(self, rwork=None, iwork=None):
+        if not self._is_uptodate:
+            self.discrete_op =\
+                VelocityCorrection_D(self.discreteFields[self.velocity],
+                                     self.discreteFields[self.vorticity],
+                                     self.req_flowrate, self.cb, rwork=rwork,
+                                     iwork=iwork)
+            # Output setup
+            self._set_io('velocity_correction', (1, 2 + self.domain.dimension))
+            self.discrete_op.setWriter(self._writer)
+            self._is_uptodate = True
 
     def computeCorrection(self):
         """
         Compute the required correction for the current state
         but do not apply it onto velocity.
         """
-        self.discreteOperator.computeCorrection()
+        self.discrete_op.computeCorrection()
diff --git a/HySoP/hysop/problem/#simulation.py# b/HySoP/hysop/problem/#simulation.py#
new file mode 100644
index 0000000000000000000000000000000000000000..821f9fba4cbe886953ab1d0741a02ab9998f275d
--- /dev/null
+++ b/HySoP/hysop/problem/#simulation.py#
@@ -0,0 +1,138 @@
+"""
+@file simulation.py
+
+Description of the simulation parameters (time, iteration ...)
+"""
+import sys
+eps = sys.float_info.epsilon
+
+
+class Simulation(object):
+    """
+    Setup for simulation parameters.
+    """
+
+    def __init__(self, tinit=0.0, tend=1.0, nbIter=None, timeStep=None,
+                 iterMax=1000):
+        """
+        Creates a Timer.
+        @param tinit : Simulation starting time.
+        @param tend : Simulation final time.
+        @param nbIter : number of required iterations
+        @param timeStep : default time step
+        @param iterMax : maximum number of iterations (useful
+        only if timeStep is modified. See adaptativeTimeStep operator).
+
+        If both timeStep and nbIter are given, timeStep is not used.
+
+        Notation:
+        iteration number 'currentIteration'
+        between tk and tkp1 = tk + timeStep
+        """
+        ## Simulation final time
+        self.end = tend
+        ## Starting time
+        self.start = tinit
+        ## Simulation current time
+        self.time = tinit
+        ## Is simulation is terminated
+        self.isOver = False
+        ## Iteration counter
+        self.currentIteration = -1
+        ## Number of iterations
+        if nbIter is not None:
+            self.nbIter = nbIter
+            if nbIter is not None and timeStep is not None:
+                print ('Warning : both nbIter and timeStep are given.\
+                timeStep is ignored') 
+            self.timeStep = (self.end - self.start) / self.nbIter
+        elif timeStep is not None:
+            ## Simulation time step
+            self.timeStep = timeStep
+        else:
+            raise ValueError('You must set nbIter or timeStep value.')
+        self.iterMax = iterMax
+        assert iterMax >= nbIter
+        ## Starting time for the current iteration
+        self.tk = tinit
+        ## tk + dt
+        self.tkp1 = tinit + self.timeStep
+        self._lastStep = False
+        assert self.end > self.start, \
+            'Final time must be greater than initial time'
+        assert (self.start + self.timeStep) <= self.end,\
+            'start + step is bigger than end.'
+
+        ## Internal tolerance for timer
+        self.tol = eps
+
+    def advance(self):
+        """
+        Proceed to next time.
+        Advance time and iteration number.
+        Compute a timestep for the incoming iteration (from a vairable and
+        to reach the end of simulation)
+        """
+        # Increment iteration counter
+        self.currentIteration += 1
+        if self._lastStep:
+            # The timestep was adjusted to reach end in the previous call
+            # So now the simulation is over
+            self.isOver = True
+        else:
+            if self.currentIteration < self.iterMax:
+                # Advance time for the iteration just ended
+                self.tk = self.tkp1
+                self.tkp1 = self.tk + self.timeStep
+
+                # Adjust last timestep to reach self.end
+                if self.tkp1 > self.end:
+                    self.timeStep = self.end - self.tk
+                    if self.timeStep <= self.tol:
+                        self.isOver = True
+                    else:
+                        self.tkp1 = self.end
+                        self._lastStep = True
+            else:
+                # iteration number is reached
+                self.isOver = True
+
+        self.time = self.tkp1
+
+    def updateTimeStep(self, newDt):
+        """
+        Update current time step.
+        This function is usually called from Adapt_timestep operator.
+        """
+        self.timeStep = newDt
+
+    def printState(self):
+        """
+        print current state
+        """
+        msg = "== Iteration : {0:3d}, from t = {1:6.5} to t = {2:6.5f} =="
+        print (msg.format(self.currentIteration, self.tk, self.time))
+
+    def __str__(self):
+        s = "Simulation parameters : "
+        s += "from " + str(self.start) + ' to ' + str(self.end)
+        s += ', time step : ' + str(self.timeStep)
+        s += ', current time : ' + str(self.time) + ', iteration number : '
+        s += str(self.currentIteration) + ', max number of iterations : '
+        s += str(self.iterMax)
+        return s
+
+    def initialize(self):
+        self.tk = self.start
+        self.tkp1 = self.start + self.timeStep
+        self.time = self.tkp1
+        self.isOver = False
+        self.currentIteration = 0
+        self._lastStep = False
+
+    def finalize(self):
+        """
+        Useful for printers if printer.frequency is not 1
+        """
+        self.isOver = True
+        self.currentIteration = -1
diff --git a/HySoP/hysop/problem/__init__.pyc b/HySoP/hysop/problem/__init__.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..13e2009991cc570dda0ac8c74b593dce55b3b694
Binary files /dev/null and b/HySoP/hysop/problem/__init__.pyc differ
diff --git a/HySoP/hysop/problem/problem.py b/HySoP/hysop/problem/problem.py
index 1081f377ac1c7d2915582d961fd3afa41d8f7d02..10760a089d6237aa685e0c6547b9c777842081fc 100644
--- a/HySoP/hysop/problem/problem.py
+++ b/HySoP/hysop/problem/problem.py
@@ -3,23 +3,25 @@
 
 Complete problem description.
 """
-from parmepy.constants import debug, parmesPickle
+from parmepy.constants import debug
+import cPickle
 from parmepy import __VERBOSE__
-from parmepy.operator.monitors.monitoring import Monitoring
 from parmepy.operator.redistribute import Redistribute
-from parmepy.tools.timers import Timer, timed_function
+from parmepy.operator.redistribute_intra import RedistributeIntra
+from parmepy.tools.profiler import profile, Profiler
 from parmepy.mpi import main_rank
+from parmepy.gpu.gpu_transfer import DataTransfer
 
 
 class Problem(object):
     """
     Problem representation.
 
-    Contains several operators and monitors that apply on variables.
+    Contains several operators that apply on variables.
     Variables are defined on different domains.\n
     Each operator is set up and variables are initialized in a set up step.\n
     To solve the problem, a loop over time-steps is launched. A step consists
-    in calling the apply method of each operators and monitors.\n
+    in calling the apply method of each operators.\n
     To finish, a finalize method is called.\
     """
 
@@ -28,7 +30,7 @@ class Problem(object):
         return object.__new__(cls, *args, **kw)
 
     @debug
-    def __init__(self, operators, simulation, monitors=None,
+    def __init__(self, operators, simulation,
                  dumpFreq=100, name=None):
         """
         Create a transport problem instance.
@@ -36,7 +38,6 @@ class Problem(object):
         @param operators : list of operators.
         @param simulation : a parmepy.simulation.Simulation object
         to describe simulation parameters.
-        @param monitors : list of monitors.
         @param name : an id for the problem
         @param dumpFreq : frequency of dump (i.e. saving to a file)
         for the problem; set dumpFreq = -1 for no dumps. Default = 100.
@@ -45,16 +46,15 @@ class Problem(object):
         self.name = name
         ## Problem operators
         self.operators = operators
-        if monitors is None:
-            monitors = []
-        for m in monitors:
-            self.operators.append(m)
         ## Computes time step and manage iterations
         self.simulation = simulation
-        self.domain = self.operators[0].variables[0].domain
+        vref = self.operators[0].variables.keys()[0]
+        self.domain = vref.domain
         for op in self.operators:
-            for v in op.variables:
-                if not self.domain is v.domain:
+            for v in (v for v in op.variables if v is not vref):
+                print id(v.domain), id(self.domain)
+                print v.domain, self.domain
+                if self.domain is not v.domain:
                     raise ValueError("Problem must have only one " +
                                      "domain for variables.")
         ## A list of variables that must be initialized before
@@ -75,74 +75,61 @@ class Problem(object):
         else:
             self.name = name
         ## Object to store computational times of lower level functions
-        self.timer = Timer(self)
+        self.profiler = Profiler(self, self.domain.comm_task)
         ## Default file name prefix for dump.
         self.filename = str(self.name)
         self._filedump = self.filename + '_rk_' + str(main_rank)
 
         # Flag : true when operators for computation are up
-        # and when variables are initialized (i.e. after a call to pre_setUp)
+        # and when variables are initialized (i.e. after a call to pre_setup)
         # Note : 3 categories of op : computation (stretching, poisson ...),
-        # monitors (printer ...) and data distribution (Redistribute)
+        # and data distribution (Redistribute)
         self._isReady = False
 
-    def addMonitors(self, monitors, position=None):
-        """
-        Set monitors for the problem
-        If position is given, insert monitor[i] at position[i]
-        else, monitior are added at the end of the list.
-        """
-        if position is None:
-            for m in monitors:
-                self.operators.append(m)
-        else:
-            for m, pos in zip(monitors, position):
-                self.operators.insert(pos, m)
-
     @debug
-    @timed_function
-    def setUp(self):
+    @profile
+    def setup(self):
         """
         Prepare operators (create topologies, allocate memories ...)
         """
         # Set up for 'computational' operators
         if not self._isReady:
-            self.pre_setUp()
-
-        for v in self.input:
-            v.initialize()
+            self.pre_setup()
+        print "Fin setup op"
+        # for v in self.input:
+        #     v.initialize()
 
         # other operators
         for op in self.operators:
-            if isinstance(op, Monitoring):
-                op.setUp()
-                op.discretize()
+            if isinstance(op, RedistributeIntra) or \
+               isinstance(op, DataTransfer):
+                op.setup()
 
         for op in self.operators:
             if isinstance(op, Redistribute):
-                op.setUp()
+                op.setup()
 
         if __VERBOSE__ and main_rank == 0:
             print ("====")
 
-    def pre_setUp(self):
+    def pre_setup(self):
         """
         - Partial setup : only for 'computational' operators
-        (i.e. excluding monitors, rendering, data distribution ...)
+        (i.e. excluding rendering, data distribution ...)
         - Initialize variables.
         """
         if self._isReady:
             pass
 
         for op in self.operators:
-            if not isinstance(op, Monitoring):
-                if not isinstance(op, Redistribute):
-                    op.discretize()
+            if not isinstance(op, Redistribute) and \
+               not isinstance(op, DataTransfer):
+                op.discretize()
 
         for op in self.operators:
-            if not isinstance(op, Monitoring):
-                if not isinstance(op, Redistribute):
-                    op.setUp()
+            if not isinstance(op, Redistribute) and \
+               not isinstance(op, DataTransfer):
+                op.setup()
 
         if __VERBOSE__ and main_rank == 0:
             print ("==== Variables initialization ====")
@@ -153,6 +140,10 @@ class Problem(object):
         # Set the variables input topology as the the topology of the fist
         # operator that uses this variable as input.
         self.input = []
+        for op in self.operators:
+            for v in op.input:
+                if v not in self.input:
+                    self.input.append(v)
         for op in self.operators[::-1]:
             for v in op.output:
                 if v in self.input:
@@ -160,14 +151,11 @@ class Problem(object):
             for v in op.input:
                 if not v in self.input:
                     self.input.append(v)
-#                    if not isinstance(op, Monitoring):
-#                        if not isinstance(op, Redistribute):
-#                            v.setTopoInit(op.discreteFields[v].topology)
 
         self._isReady = True
 
     @debug
-    @timed_function
+    @profile
     def solve(self):
         """
         Solve problem.
@@ -202,15 +190,9 @@ class Problem(object):
         """
         if main_rank == 0:
             print ("\n\n==== End ====")
-        ## We terminate monitors before operators.
         for op in self.operators:
-            if isinstance(op, Monitoring):
-                op.finalize()
-                self.timer = self.timer + op.timer
-        for op in self.operators:
-            if not isinstance(op, Monitoring):
-                op.finalize()
-                self.timer = self.timer + op.timer
+            op.finalize()
+
         var = []
         for op in self.operators:
             for v in op.variables:
@@ -218,13 +200,17 @@ class Problem(object):
                     var.append(v)
         for v in var:
             v.finalize()
-            try:
-                self.timer = self.timer + v.timer
-            except AttributeError:
-                pass
+        self.profiler.summarize()
         if main_rank == 0:
             print ("===\n")
 
+    def get_profiling_info(self):
+        for op in self.operators:
+            self.profiler += op.profiler
+        for op in self.operators:
+            for v in op.variables:
+                self.profiler += v.profiler
+
     def __str__(self):
         """ToString method"""
         s = "Problem based on\n"
@@ -246,10 +232,10 @@ class Problem(object):
         if filename is not None:
             self.filename = filename
             self._filedump = filename + '_rk_' + str(main_rank)
-        db = parmesPickle(self._filedump, mode='store')
-        db.dump(self.simulation, 'simulation')
+        db = open(self._filedump, 'wb')
+        cPickle.dump(self.simulation, db)
         for v in self.input:
-            v.dump(self.filename, mode='append')
+            v.dump(self.filename)
 
     def restart(self, filename=None):
         """
@@ -263,18 +249,16 @@ class Problem(object):
         if filename is not None:
             self.filename = filename
             self._filedump = filename + '_rk_' + str(main_rank)
-        db = parmesPickle(self._filedump, mode='load')
-        self.simulation = db.load('simulation')[0]
+        db = open(self._filedump, 'r')
+        self.simulation = cPickle.load(db)
         self.simulation.reset()
         for v in self.input:
             print ("load ...", self.filename)
             v.load(self.filename)
 
         for op in self.operators:
-            if isinstance(op, Monitoring):
-                op.setUp()
             if isinstance(op, Redistribute):
-                op.setUp()
+                op.setup()
 
     def setDumpFreq(self, freq):
         """
diff --git a/HySoP/hysop/problem/problem_tasks.py b/HySoP/hysop/problem/problem_tasks.py
index bf3acee319850e1b5da265fabae5352c1d75fb07..624dfc59859ee6743170d4170c47fc67a6a1095b 100644
--- a/HySoP/hysop/problem/problem_tasks.py
+++ b/HySoP/hysop/problem/problem_tasks.py
@@ -8,15 +8,16 @@ same tasks.
 from parmepy.constants import debug
 from parmepy import __VERBOSE__
 from parmepy.problem.problem import Problem
-from parmepy.operator.monitors.monitoring import Monitoring
+from parmepy.operator.redistribute_inter import RedistributeInter
+from parmepy.operator.redistribute_intra import RedistributeIntra
 from parmepy.operator.redistribute import Redistribute
-from parmepy.operator.redistribute_intercomm import RedistributeIntercomm
-from parmepy.tools.timers import timed_function
+from parmepy.gpu.gpu_transfer import DataTransfer
+from parmepy.tools.profiler import profile
 
 
 class ProblemTasks(Problem):
     """
-    As in Problem, it contains several operators and monitors that apply
+    As in Problem, it contains several operators that apply
     on variables. The operators are labeled by task_id that defines
     a identifier of a task.
     Tasks are subset of operators and are assigned to a subset of the MPI
@@ -27,7 +28,7 @@ class ProblemTasks(Problem):
         return object.__new__(cls, *args, **kw)
 
     @debug
-    def __init__(self, operators, simulation, tasks_list, monitors=None,
+    def __init__(self, operators, simulation, domain, tasks_list,
                  dumpFreq=100, name=None, main_comm=None):
         """
         Creates the problem.
@@ -35,7 +36,6 @@ class ProblemTasks(Problem):
         @param simulation : a parmepy.simulation.Simulation object
         to describe simulation parameters.
         @param tasks_list : list of task identifiers for each process rank
-        @param monitors : list of monitors.
         @param name : an id for the problem
         @param dumpFreq : frequency of dump (i.e. saving to a file)
         for the problem; set dumpFreq = -1 for no dumps. Default = 100.
@@ -45,8 +45,8 @@ class ProblemTasks(Problem):
         @remark : process number in communicator main_comm must equal the
         length of tasks_list.
         """
-        Problem.__init__(self, operators, simulation, monitors=monitors,
-                         dumpFreq=dumpFreq, name=name)
+        Problem.__init__(self, operators, simulation,
+                         domain=domain, dumpFreq=dumpFreq, name=name)
         self.tasks_list = tasks_list
         if main_comm is None:
             from parmepy.mpi.main_var import main_comm
@@ -56,42 +56,35 @@ class ProblemTasks(Problem):
             "The given task list length (" + str(self.tasks_list) + ") " \
             "does not match the communicator size" \
             " ({0})".format(self.main_comm.Get_size())
+        self.my_task = self.tasks_list[self._main_rank]
+        self.operators_on_task = []
 
-    def pre_setUp(self):
+    def pre_setup(self):
         """
-        - Removes operators and monitors that not have the same task identifier
+        - Removes operators that not have the same task identifier
         as the current process
         - Keep the Redistribute_intercomm in both 'from' and 'to' task_id
         - Partial setup : only for 'computational' operators
-        (i.e. excluding monitors, rendering, data distribution ...)
+        (i.e. excluding rendering, data distribution ...)
         - Initialize variables.
         """
         if self._isReady:
             pass
 
-        self.operators_backup = []
         ## Remove operators with a tasks not handled by this process.
-        for op in self.operators[::-1]:
-            self.operators_backup.append(op)
-            if not isinstance(op, RedistributeIntercomm):
-                if op.task_id != self.tasks_list[self._main_rank]:
-                    self.operators.remove(op)
-            else:
-                if op.id_from != self.tasks_list[self._main_rank] and \
-                        op.id_to != self.tasks_list[self._main_rank]:
-                    self.operators.remove(op)
+        for op in self.operators:
+            if op.task_id() == self.my_task:
+                self.operators_on_task.append(op)
 
         # Discretize and setup computational operators
-        for op in self.operators:
-            if not isinstance(op, Monitoring):
-                if not isinstance(op, Redistribute):
-                    if not isinstance(op, RedistributeIntercomm):
-                        op.discretize()
-        for op in self.operators:
-            if not isinstance(op, Monitoring):
-                if not isinstance(op, Redistribute):
-                    if not isinstance(op, RedistributeIntercomm):
-                        op.setUp()
+        for op in self.operators_on_task:
+            if not isinstance(op, Redistribute) and \
+               not isinstance(op, DataTransfer):
+                op.discretize()
+        for op in self.operators_on_task:
+            if not isinstance(op, Redistribute) and \
+               not isinstance(op, DataTransfer):
+                op.setup()
 
         # Build variables list to initialize
         # These are operators input variables that are not output of
@@ -99,71 +92,56 @@ class ProblemTasks(Problem):
         # Set the variables input topology as the the topology of the fist
         # operator that uses this variable as input.
         self.input = []
-        for op in self.operators[::-1]:
+        for op in self.operators_on_task:
+            for v in op.input:
+                if v not in self.input:
+                    self.input.append(v)
+        for op in self.operators_on_task[::-1]:
             for v in op.output:
                 if v in self.input:
-                    self.input.remove(v)
+                    if isinstance(op, RedistributeInter):
+                        if op._target_id == self.my_task:
+                            self.input.remove(v)
+                    else:
+                        self.input.remove(v)
             for v in op.input:
-                if not v in self.input:
-                    if isinstance(op, RedistributeIntercomm):
-                        if op.id_from == self.tasks_list[self._main_rank]:
+                if v not in self.input:
+                    if isinstance(op, RedistributeInter):
+                        if op._source_id == self.my_task:
                             self.input.append(v)
                     else:
                         self.input.append(v)
-#                    if not isinstance(op, Monitoring):
-#                        if not isinstance(op, Redistribute):
-#                            if not isinstance(op, RedistributeIntercomm):
-#                                v.setTopoInit(op.discreteFields[v].topology)
 
         self._isReady = True
 
-    def addMonitors(self, monitors, position=None):
-        """
-        Set monitors for the problem
-        If position is given, insert monitor[i] at position[i]
-        else, monitior are added at the end of the list.
-        """
-        if position is None:
-            for m in monitors:
-                if m.task_id == self.tasks_list[self._main_rank]:
-                    self.operators.append(m)
-        else:
-            for m, pos in zip(monitors, position):
-                if m.task_id == self.tasks_list[self._main_rank]:
-                    self.operators.insert(pos, m)
-
     @debug
-    @timed_function
-    def setUp(self):
+    @profile
+    def setup(self):
         """
         Prepare operators (create topologies, allocate memories ...)
         """
         # Set up for 'computational' operators
         if not self._isReady:
-            self.pre_setUp()
+            self.pre_setup()
 
-        for v in self.input:
-            v.initialize()
+        # for v in self.input:
+        #     v.initialize()
 
         # other operators
-        for op in self.operators:
-            if isinstance(op, Monitoring):
-                op.setUp()
-
-        for op in self.operators:
-            if isinstance(op, RedistributeIntercomm):
-                op.discretize()
-                op.setUp()
+        for op in self.operators_on_task:
+            if isinstance(op, RedistributeIntra) or \
+               isinstance(op, DataTransfer):
+                op.setup()
 
-        for op in self.operators:
-            if isinstance(op, Redistribute):
-                op.setUp()
+        for op in self.operators_on_task:
+            if isinstance(op, RedistributeInter):
+                op.setup()
 
         if __VERBOSE__ and self._main_rank == 0:
-            print ("====")
+            print("====")
 
     @debug
-    @timed_function
+    @profile
     def solve(self):
         """
         Solve problem.
@@ -181,7 +159,11 @@ class ProblemTasks(Problem):
             if self._main_rank == 0:
                 self.simulation.printState()
             for op in self.operators:
-                op.apply(self.simulation)
+                if op.task_id() == self.my_task:
+                    op.apply(self.simulation)
+                    if isinstance(op, RedistributeInter):
+                        if op._source_id == self.my_task:
+                            op.wait()
             testdump = \
                 self.simulation.currentIteration % self.dumpFreq is 0
             self.simulation.advance()
@@ -195,25 +177,15 @@ class ProblemTasks(Problem):
         """
         if self._main_rank == 0:
             print ("\n\n==== End ====")
-        ## We terminate monitors before operators.
-        for op in self.operators:
-            if isinstance(op, Monitoring):
-                op.finalize()
-                self.timer = self.timer + op.timer
-        for op in self.operators:
-            if not isinstance(op, Monitoring):
-                op.finalize()
-                self.timer = self.timer + op.timer
+        for op in self.operators_on_task:
+            op.finalize()
         var = []
-        for op in self.operators:
+        for op in self.operators_on_task:
             for v in op.variables:
-                if not v in var:
+                if v not in var:
                     var.append(v)
         for v in var:
             v.finalize()
-            try:
-                self.timer = self.timer + v.timer
-            except AttributeError:
-                pass
+        self.profiler.summarize()
         if self._main_rank == 0:
             print ("===\n")
diff --git a/HySoP/hysop/problem/problem_with_GLRendering.py b/HySoP/hysop/problem/problem_with_GLRendering.py
index a088a408a744581870861e5c5f791f9404c469cc..cf3f54bc66a376e4dbc30d2aab75930b8fb24852 100644
--- a/HySoP/hysop/problem/problem_with_GLRendering.py
+++ b/HySoP/hysop/problem/problem_with_GLRendering.py
@@ -4,7 +4,6 @@
 Extends Problem description to handel real time rendering wit OpenGL.
 """
 from parmepy.constants import debug
-from parmepy.tools.timers import timed_function
 from parmepy.mpi import main_rank
 from parmepy.problem.problem import Problem
 
@@ -16,7 +15,7 @@ class ProblemGLRender(Problem):
     """
 
     @debug
-    def __init__(self, operators, simulation, monitors=None,
+    def __init__(self, operators, simulation,
                  dumpFreq=100, name=None):
         """
         Create a transport problem instance.
@@ -24,24 +23,21 @@ class ProblemGLRender(Problem):
         @param operators : list of operators.
         @param simulation : a parmepy.simulation.Simulation object
         to describe simulation parameters.
-        @param monitors : list of monitors.
         @param name : an id for the problem
         @param dumpFreq : frequency of dump (i.e. saving to a file)
         for the problem; set dumpFreq = -1 for no dumps. Default = 100.
         """
         Problem.__init__(self, operators, simulation,
-                         monitors=monitors,
                          dumpFreq=dumpFreq,
                          name=name)
         self.gl_renderer = None
 
     @debug
-    @timed_function
-    def setUp(self):
+    def setup(self):
         """
         Prepare operators (create topologies, allocate memories ...)
         """
-        Problem.setUp(self)
+        Problem.setup(self)
         for ope in self.operators:
             try:
                 if ope.isGLRender:
@@ -51,7 +47,6 @@ class ProblemGLRender(Problem):
                 pass
 
     @debug
-    @timed_function
     def solve(self):
         """
         Solve problem.
diff --git a/HySoP/hysop/problem/simulation.pyc b/HySoP/hysop/problem/simulation.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a7bc78cfbc8434389538752b2d17494d003bb859
Binary files /dev/null and b/HySoP/hysop/problem/simulation.pyc differ
diff --git a/HySoP/hysop/problem/tests/test_simulation.py b/HySoP/hysop/problem/tests/test_simulation.py
index 38efa286c5c0689e406d979944ebbb7b1aafce9e..bdc7b3de6ca147067e8c4eab4c8ecfd074cbfb4e 100644
--- a/HySoP/hysop/problem/tests/test_simulation.py
+++ b/HySoP/hysop/problem/tests/test_simulation.py
@@ -3,50 +3,53 @@
 tests simulation incr and io_utils writer
 """
 from parmepy.problem.simulation import Simulation
+from parmepy.tools.parameters import IO_params
 from parmepy.tools.io_utils import Writer
 
 simu = Simulation(tinit=0.0, tend=1.0, nbIter=10)
 
 
 def test_simu_incr():
-    wr = Writer({'frequency': 2})
-    assert wr.doWrite(simu.currentIteration)
+    io_params = IO_params(filename='temp_test', frequency=2)
+    wr = Writer(io_params)
+    assert wr.do_write(simu.currentIteration)
 
     simu.initialize()
 
-    assert not wr.doWrite(simu.currentIteration)
+    assert not wr.do_write(simu.currentIteration)
 
     count = 1
     while not simu.isOver:
         if count % 2 == 0:
-            assert wr.doWrite(simu.currentIteration)
+            assert wr.do_write(simu.currentIteration)
         else:
-            assert not wr.doWrite(simu.currentIteration)
+            assert not wr.do_write(simu.currentIteration)
         simu.printState()
         simu.advance()
         count += 1
     assert simu.currentIteration == 10
     simu.finalize()
-    assert wr.doWrite(simu.currentIteration)
+    assert wr.do_write(simu.currentIteration)
 
 
 def test_simu_incr2():
-    wr = Writer({'frequency': 3})
-    assert wr.doWrite(simu.currentIteration)
+    io_params = IO_params(filename='temp_test', frequency=3)
+    wr = Writer(io_params)
+    assert wr.do_write(simu.currentIteration)
     simu.timeStep = 0.10000000001
     simu.initialize()
 
-    assert not wr.doWrite(simu.currentIteration)
+    assert not wr.do_write(simu.currentIteration)
 
     count = 1
     while not simu.isOver:
         if count % 3 == 0:
-            assert wr.doWrite(simu.currentIteration)
+            assert wr.do_write(simu.currentIteration)
         else:
-            assert not wr.doWrite(simu.currentIteration)
+            assert not wr.do_write(simu.currentIteration)
         simu.printState()
         simu.advance()
         count += 1
     assert simu.currentIteration == 10
     simu.finalize()
-    assert wr.doWrite(simu.currentIteration)
+    assert wr.do_write(simu.currentIteration)
diff --git a/HySoP/hysop/problem/tests/test_transport.py b/HySoP/hysop/problem/tests/test_transport.py
index 9a38ddd5cd94518ed5c1ca7cda257aec5f7ad7dd..3479dfb970093dfc506c6aa3acde1bde97ab439e 100644
--- a/HySoP/hysop/problem/tests/test_transport.py
+++ b/HySoP/hysop/problem/tests/test_transport.py
@@ -9,6 +9,7 @@ from parmepy.fields.continuous import Field
 from parmepy.operator.advection import Advection
 from parmepy.problem.transport import TransportProblem
 from parmepy.problem.simulation import Simulation
+from parmepy.tools.parameters import Discretization
 
 
 def cosinus_product_2D(x, y, t):
@@ -49,17 +50,17 @@ def rotating_velocity_2D(x, y, t):
 
 def assertion(dim, boxLength, boxMin, nbElem, finalTime, timeStep,
               s, v, rtol=1e-05, atol=1e-08):
-    box = Box(dim, length=boxLength, origin=boxMin)
+    box = Box(length=boxLength, origin=boxMin)
+    print "domain init ...", id(box)
     scal = Field(domain=box, formula=s, doVectorize=True, name='Scalar')
     velo = Field(domain=box, formula=v, doVectorize=True,
                  name='Velocity', isVector=True)
-    advec = Advection(velo, scal,
-                      resolutions={velo: nbElem,
-                                   scal: nbElem},
-                      )
+    advec = Advection(velo, scal, discretization=Discretization(nbElem))
     simu = Simulation(tinit=0.0, tend=finalTime, timeStep=timeStep, iterMax=1)
+    print "velo dom ...", id(velo.domain)
+    print "scal dom ...", id(scal.domain)
     pb = TransportProblem([advec], simu)
-    pb.setUp()
+    pb.setup()
     initial_scalar = npw.copy(scal.discreteFields.values()[0].data[0])
     pb.solve()
     return np.allclose(initial_scalar, scal.discreteFields.values()[0].data[0],
diff --git a/HySoP/hysop/test/__init__.py b/HySoP/hysop/test/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/HySoP/hysop/test/main_unit_tests.py b/HySoP/hysop/test/main_unit_tests.py
new file mode 100644
index 0000000000000000000000000000000000000000..7837dff17102b01da61363a2d86b3495ddc74125
--- /dev/null
+++ b/HySoP/hysop/test/main_unit_tests.py
@@ -0,0 +1,32 @@
+"""
+Launch tests for parmepy
+"""
+import unittest
+import doctest
+import sys
+import os
+# Insert parmepy sources path to sys.path from sys.path[0] (i.e. absolute path to __file__)
+# This file is desined to be launched after calling: setup.py build
+# Then this file is located in ./build/lib._platform_dependent_directory/parmepy/test
+# Tests are run on parmepy sources located in ./build/lib._platform_dependent_directory
+if sys.path[0].find("build/lib.") >= 0:
+    sys.path.insert(1, os.path.split(os.path.split(sys.path[0])[0])[0])
+import parmepy
+
+# Automatic recursive finding unittest.TestCase implemetations in package 'test'
+suite = unittest.TestLoader().discover(sys.path[0], pattern='test*.py')
+
+# Add doctests from python files
+suite.addTest(doctest.DocFileSuite('domain/box.py', package=parmepy))
+
+runner = unittest.TextTestRunner(verbosity=2).run(suite)
+
+if sys.path[0].find("build/lib.") >= 0:
+    if not runner.wasSuccessful():
+        fails = "\nFAILURES in " + __file__ + " : (" + str(len(runner.failures)) + ")\n"
+        for fail in runner.failures:
+            fails += fail[1]
+        log_failures = open('Testing/Temporary/PythonFailures.log', 'w')
+        log_failures.write(fails)
+        log_failures.close()
+        raise Exception("FAILED")
diff --git a/HySoP/hysop/test/test_obstacle/__init__.py b/HySoP/hysop/test/test_obstacle/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/HySoP/hysop/test/test_obstacle/test_obstacle.py b/HySoP/hysop/test/test_obstacle/test_obstacle.py
new file mode 100644
index 0000000000000000000000000000000000000000..7154dac267e4e6942565cd4776e607eec318dc3b
--- /dev/null
+++ b/HySoP/hysop/test/test_obstacle/test_obstacle.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+import unittest
+import time
+import parmepy as pp
+import numpy as np
+import numpy.testing as npt
+from parmepy.constants import *
+from math import *
+
+
+
+def run():
+    # Parameters
+    nb = 65
+    timeStep = 0.02
+    finalTime = 1.
+#    outputFilePrefix = './parmepy/test/test_obstacle/Domain_'
+    outputFilePrefix = './res/Domain_'
+    outputModulo = 1
+
+    t0 = time.time()
+
+    ## Domain
+    box = pp.Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+
+    ## Obstacle
+    sphere = pp.Obstacle(box, name='sphere', zlayer=0.1, radius=0.2, center=[0.5,0.5,0.5], orientation='West', porousLayer=0.1)
+
+    ## ChiDomain
+    chiDomain = pp.ContinuousField(domain=box, name='ChiDomain', vector=False)
+
+    ## Solver creation (discretisation of objects is done in solver initialisation)
+    topo3D = pp.CartesianTopology(domain=box, resolution=[nb, nb, nb], dim=3, periods = [False, False, False])
+
+    ## Obstacle discretization
+    chiDomain.discretize(topo3D)
+    chiDomainD = chiDomain.discreteField[0]
+    sphere.discretize(topo3D)
+    sphereD = sphere.discreteObstacle[0]
+    sphereD.chiFunctions()
+    for x in sphereD.chiBoundary[:] :
+        chiDomainD[x[0], x[1], x[2]]=1.
+    for x in sphereD.chiSolid[:] :
+        chiDomainD[x[0], x[1], x[2]]=1.
+    for x in sphereD.chiPorous[:] :
+        chiDomainD[x[0], x[1], x[2]]=0.5
+#    for k in xrange (topo3D.mesh.resolution[2]):
+#        for j in xrange (topo3D.mesh.resolution[2]):
+#            for i in xrange (topo3D.mesh.resolution[2]):
+#                if ([i,j,k] in sphereD.chiBoundary) :
+#                    chiDomainD[i,j,k]=1.
+#                if ([i,j,k] in sphereD.chiSolid) :
+#                    chiDomainD[i,j,k]=1.
+#                if ([i,j,k] in sphereD.chiPorous) :
+#                    chiDomainD[i,j,k]=0.5
+    io=pp.Printer(fields=[chiDomain], frequency=outputModulo, outputPrefix=outputFilePrefix)
+    io.step()
+
+    t1 = time.time()
+
+    tf = time.time()
+
+    print "\n"
+    print "Total time : ", tf - t0, "sec (CPU)"
+    print "Init time : ", t1 - t0, "sec (CPU)"
+    print "Solving time : ", tf - t1, "sec (CPU)"
+
+
+if __name__ == "__main__":
+    run()
diff --git a/HySoP/hysop/test/test_operator/__init__.py b/HySoP/hysop/test/test_operator/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/HySoP/hysop/test/test_operator/test_CondStability.py b/HySoP/hysop/test/test_operator/test_CondStability.py
new file mode 100755
index 0000000000000000000000000000000000000000..e8b0c1271fdb30b1a86427d0fbd657a811411618
--- /dev/null
+++ b/HySoP/hysop/test/test_operator/test_CondStability.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+import time
+from parmepy.operator.differentialOperator_d import DifferentialOperator_d
+from parmepy.particular_solvers.integrator.euler import Euler
+#from parmepy.particular_solvers.integrator.runge_kutta2 import RK2
+#from parmepy.particular_solvers.integrator.runge_kutta3 import RK3
+#from parmepy.particular_solvers.integrator.runge_kutta4 import RK4
+import parmepy as pp
+from parmepy.constants import *
+import numpy as np
+from math import *
+import unittest
+#import sys
+import struct
+import array
+
+
+class test_CondStability(unittest.TestCase):
+    """
+    Condition Stability test class
+    """
+
+    def vitesse(self,x, y, z):
+        vx = 1.
+        vy = 1.
+        vz = 1.
+        return vx, vy, vz
+
+    def vorticite(self,x, y, z):
+        wx = 1.
+        wy = 1.
+        wz = 1.
+        return wx, wy, wz
+
+    def scalaire(self,x, y, z):
+        if x < 0.5 and y < 0.5 and z < 0.5:
+            return 1.
+        else:
+            return 0.
+
+
+    def testCondStab(self):
+        # Parameters
+        nb = 128
+        timeStep = 0.09
+        finalTime = 0.09
+        self.t = 0.
+        t0 = time.time()
+
+        ## Domain
+        box = pp.Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+
+        ## Fields
+        velo = pp.AnalyticalField(domain=box, formula=self.vitesse, name='Velocity', vector=True)
+        vorti = pp.AnalyticalField(domain=box, formula=self.vorticite, name='Vorticity', vector=True)
+
+        inputJBField = [np.zeros((nb,nb,nb), dtype=PARMES_REAL, order=ORDER) for d in xrange(3)]
+
+        f1 = open('./parmepy/test/data/Fields_sav0_U.data','rb')
+        f2 = open('./parmepy/test/data/Fields_sav0_V.data','rb')
+        f3 = open('./parmepy/test/data/Fields_sav0_W.data','rb')
+
+        nbx = np.asarray(struct.unpack("i",f1.read(4)))
+        nby = np.asarray(struct.unpack("i",f1.read(4)))
+        nbz = np.asarray(struct.unpack("i",f1.read(4)))
+
+        binvalues = array.array('d')
+        binvalues.read(f1, nbx*nby*nbz)
+
+        data = np.array(binvalues, dtype=PARMES_REAL)
+        inputJBField[0] = np.reshape(data, (nbx,nby,nbz))
+        f1.close()
+
+        nbx = np.asarray(struct.unpack("i",f2.read(4)))
+        nby = np.asarray(struct.unpack("i",f2.read(4)))
+        nbz = np.asarray(struct.unpack("i",f2.read(4)))
+
+        binvalues = array.array('d')
+        binvalues.read(f2, nbx*nby*nbz)
+
+        data = np.array(binvalues, dtype=PARMES_REAL)
+        inputJBField[1] = np.reshape(data, (nbx,nby,nbz))
+        f2.close()
+
+        nbx = np.asarray(struct.unpack("i",f3.read(4)))
+        nby = np.asarray(struct.unpack("i",f3.read(4)))
+        nbz = np.asarray(struct.unpack("i",f3.read(4)))
+
+        binvalues = array.array('d')
+        binvalues.read(f3, nbx*nby*nbz)
+
+        data = np.array(binvalues, dtype=PARMES_REAL)
+        inputJBField[2] = np.reshape(data, (nbx,nby,nbz))
+        f3.close()
+
+        ## Operators
+        stretch = pp.Stretching(velo,vorti) 
+
+        ## Solver creation (discretisation of objects is done in solver initialisation)
+        topo3D = pp.CartesianTopology(domain=box, resolution=[nbx[0]+1, nby[0]+1, nbz[0]+1], dim=3, ghosts=[2,2,2])
+
+        ##Problem
+        pb = pp.Problem(topo3D, [stretch])
+
+        ## Setting solver to Problem
+        pb.setSolver(finalTime, timeStep, solver_type='basic')
+        pb.solver.ODESolver=  Euler#RK4# RK3# RK2# 
+        pb.initSolver()
+
+
+#        self.result = [np.ones((nbx, nby, nbz), dtype=PARMES_REAL, order=ORDER) for d in xrange(3)]
+#        vortidata= [np.zeros((128,128,128), dtype=PARMES_REAL, order=ORDER) for d in xrange(3)]
+        ## Input of JB velocity Fields
+#        print 'shape', stretch.velocity.discreteField[0].data[1][2:nb+2, 2:nb+2,2:nb+2].shape ,inputJBField[1].shape
+        stretch.velocity.discreteField[0].data[0][2:nb+2,2:nb+2,2:nb+2] = inputJBField[0]
+        stretch.velocity.discreteField[0].data[1][2:nb+2,2:nb+2,2:nb+2] = inputJBField[1]
+        stretch.velocity.discreteField[0].data[2][2:nb+2,2:nb+2,2:nb+2] = inputJBField[2]
+#        print 'shape', np.asarray(stretch.velocity.discreteField[0].data).shape
+
+        ## Calculation of vorticity Fields from velocity input data (JB)
+        self.curl = DifferentialOperator_d(stretch.velocity.discreteField[0].data, stretch.velocity.discreteField[0].data, choice='curl', topology=topo3D)
+        stretch.vorticity.discreteField[0].data = self.curl.discreteOperator.apply()
+        t1 = time.time()
+
+        ## Solve problem to deduce the LCFL
+        pb.solve()
+
+        tf = time.time()
+
+        print "\n"
+        print "Total time : ", tf - t0, "sec (CPU)"
+        print "Init time : ", t1 - t0, "sec (CPU)"
+        print "Solving time : ", tf - t1, "sec (CPU)"
+
+
+    def runTest(self):
+        self.testCondStab()
+
+def suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(test_CondStability))
+    return suite
+
+if __name__ == "__main__":
+    unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/HySoP/hysop/test/test_operator/test_Curl.py b/HySoP/hysop/test/test_operator/test_Curl.py
new file mode 100755
index 0000000000000000000000000000000000000000..7554bf7ef4793f871412ce41b68e9214e705c979
--- /dev/null
+++ b/HySoP/hysop/test/test_operator/test_Curl.py
@@ -0,0 +1,122 @@
+# -*- coding: utf-8 -*-
+import unittest
+
+#import parmepy
+
+from parmepy.operator.transport import *
+from parmepy.operator.continuous import *
+from parmepy.operator.differentialOperator import *
+from parmepy.operator.stretching import *
+from parmepy.fields.discrete import *
+from parmepy.fields.continuous import *
+from parmepy.fields.analytical import *
+from parmepy.domain.topology import *
+from parmepy.domain.box import *
+from parmepy.constants import *
+from parmepy.particular_solvers.basic import *
+from parmepy.particular_solvers.integrator.euler import *
+from parmepy.particular_solvers.solver import *
+import numpy as np
+import numpy.testing as npt
+import math
+
+
+class test_Curl(unittest.TestCase):
+    """
+    DiscreteVariable test class
+    """
+    def setup(self):
+        self.e = 0.0001  # Accepted error between result and analytical result
+        self.dim = 3
+        self.boxLength = [2.*np.pi, 2.*np.pi, 2.*np.pi]
+        self.boxMin = [ 0., 0., 0.]
+        self.nbPts = [33, 33, 33]
+        self.t = 0.
+        self.timeStep = 0.02
+        self.box = Box(dimension=self.dim,
+                       length=self.boxLength,
+                       origin=self.boxMin)
+
+    def testOperatorCurl(self):
+        # Continuous fields and operator declaration
+        self.velo = AnalyticalField(domain=self.box, formula=self.vitesse, name='Velocity', vector=True)
+        self.curl = DifferentialOperator(self.velo, self.velo, choice='curl')
+        # Topology definition
+        self.topo3D = CartesianTopology(domain=self.box, resolution=self.nbPts, dim=self.dim, ghosts=[2,2,2])
+        self.topo3DnoG = CartesianTopology(domain=self.box, resolution=self.nbPts, dim=self.dim, ghosts=[0,0,0])
+        self.result = [np.ones((self.topo3D.mesh.resolution), dtype=PARMES_REAL, order=ORDER) for d in xrange(self.dim)]
+        # Fields and operator discretization
+        self.velo.discretize(self.topo3D)
+        self.velo.initialize()
+        self.curl.discretize(self.velo.discreteField[self.velo._fieldId], self.velo.discreteField[self.velo._fieldId], topology=self.topo3D)
+        self.result = self.curl.discreteOperator.apply()
+        self.FinalTime = 0.02
+        self.anal = np.vectorize(self.vorticite)(self.topo3DnoG.mesh.coords[0], \
+                                                     self.topo3DnoG.mesh.coords[1], \
+                                                     self.topo3DnoG.mesh.coords[2])
+        # Comparison with analytical solution 
+#        print "max:",np.max(abs(self.anal[0]-self.result[0]))
+        ind0a = self.topo3D.ghosts[0]
+        ind0b = self.topo3D.mesh.resolution[0]-self.topo3D.ghosts[0]
+        ind1a = self.topo3D.ghosts[1]
+        ind1b = self.topo3D.mesh.resolution[1]-self.topo3D.ghosts[1]
+        ind2a = self.topo3D.ghosts[2]
+        ind2b = self.topo3D.mesh.resolution[2]-self.topo3D.ghosts[2]
+        npt.assert_array_less(abs(self.anal[0] - \
+                                            self.result[0][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+#        print "max:",np.max(abs(self.anal[1]-self.result[1]))
+        npt.assert_array_less(abs(self.anal[1] - \
+                                            self.result[1][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+#        print "max:",np.max(abs(self.anal[2]-self.result[2]))
+        npt.assert_array_less(abs(self.anal[2] - \
+                                            self.result[2][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+
+    def vitesse(self, x, y, z):
+#        amodul = np.cos(np.pi*self.t/3)
+#        pix = np.pi*x
+#        piy = np.pi*y
+#        piz = np.pi*z
+#        pi2x = 2.*pix
+#        pi2y = 2.*piy
+#        pi2z = 2.*piz
+#        vx = 2.*np.sin(pix)*np.sin(pix)*np.sin(pi2y)*np.sin(pi2z)*amodul
+#        vy = -np.sin(pi2x)*np.sin(piy)*np.sin(piy)*np.sin(pi2z)*amodul
+#        vz = -np.sin(pi2x)*np.sin(piz)*np.sin(piz)*np.sin(pi2y)*amodul
+#        vx = np.cos(y)
+#        vy = np.cos(z)
+#        vz = np.cos(x)
+        vx = np.cos(y) * np.cos(z)
+        vy = np.cos(z) * np.cos(x)
+        vz = np.cos(x) * np.cos(y)
+        return vx, vy, vz
+
+    def vorticite(self, x, y, z):
+#        amodul = np.cos(np.pi*self.t/3)
+#        pix = np.pi*x
+#        piy = np.pi*y
+#        piz = np.pi*z
+#        pi2x = 2.*pix
+#        pi2y = 2.*piy
+#        pi2z = 2.*piz
+#        wx = 2.* np.pi * np.sin(pi2x) * amodul*( - np.cos(pi2y)*np.sin(piz)*np.sin(piz)+ np.sin(piy)*np.sin(piy)*np.cos(pi2z) )
+#        wy = 2.* np.pi * np.sin(pi2y) * amodul*( 2.*np.cos(pi2z)*np.sin(pix)*np.sin(pix)+ np.sin(piz)*np.sin(piz)*np.cos(pi2x) )
+#        wz = -2.* np.pi * np.sin(pi2z) * amodul*( np.cos(pi2x)*np.sin(piy)*np.sin(piy)+ np.sin(pix)*np.sin(pix)*np.cos(pi2y) )
+#        wx = np.sin(z)
+#        wy = np.sin(x)
+#        wz = np.sin(y)
+        wx = np.cos(x) * (np.sin(z) - np.sin(y))
+        wy = np.cos(y) * (np.sin(x) - np.sin(z))
+        wz = np.cos(z) * (np.sin(y) - np.sin(x))
+        return wx, wy, wz
+
+    def runTest(self):
+        self.setup()
+        self.testOperatorCurl()
+
+def suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(test_Curl))
+    return suite
+
+if __name__ == "__main__":
+    unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/HySoP/hysop/test/test_operator/test_DivProduct.py b/HySoP/hysop/test/test_operator/test_DivProduct.py
new file mode 100755
index 0000000000000000000000000000000000000000..dba99bd35e142750ad770eeff15bfa42801f9ff3
--- /dev/null
+++ b/HySoP/hysop/test/test_operator/test_DivProduct.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+import unittest
+
+#import parmepy
+
+from parmepy.operator.transport import *
+from parmepy.operator.continuous import *
+from parmepy.operator.differentialOperator import *
+from parmepy.operator.stretching import *
+from parmepy.fields.discrete import *
+from parmepy.fields.continuous import *
+from parmepy.fields.analytical import *
+from parmepy.domain.topology import *
+from parmepy.domain.box import *
+from parmepy.constants import *
+from parmepy.particular_solvers.basic import *
+from parmepy.particular_solvers.integrator.euler import *
+from parmepy.particular_solvers.solver import *
+import numpy as np
+import numpy.testing as npt
+import math
+
+
+class test_DivProduct(unittest.TestCase):
+    """
+    DiscreteVariable test class
+    """
+    def setup(self):
+        self.e = 0.0002  # Accepted error between result and analytical result
+        self.dim = 3
+        self.boxLength = [2.*np.pi, 2.*np.pi, 2.*np.pi]
+        self.boxMin = [ 0., 0., 0.]
+        self.nbPts = [32, 32, 32]
+        self.timeStep = 0.02
+        self.ghosts = [2,2,2]
+        self.box = Box(dimension=self.dim,
+                       length=self.boxLength,
+                       origin=self.boxMin)
+
+    def testOperatorDiv(self):
+        # Continuous fields and operator declaration
+        self.velo = AnalyticalField(domain=self.box, formula=self.vitesse, name='Velocity', vector=True)
+        self.vorti = AnalyticalField(domain=self.box, formula=self.vorticite, name='Vorticity', vector=True)
+        # chercher cas test de tel sorte que le stretching serait periodique et analytique
+        self.div = DifferentialOperator(self.vorti, self.velo, choice='divWU')
+        # Topology definition / Fields and operator discretization
+#        self.result = [np.ones((self.nbPts), dtype=PARMES_REAL, order=ORDER) for d in xrange(self.dim)]
+        self.topo3D = CartesianTopology(domain=self.box, resolution=self.nbPts, dim=self.dim, ghosts=self.ghosts)
+        self.topo3DnoG = CartesianTopology(domain=self.box, resolution=self.nbPts, dim=self.dim, ghosts=[0,0,0])
+        self.vorti.discretize(self.topo3D)
+        self.velo.discretize(self.topo3D)
+        self.vorti.initialize()
+        self.velo.initialize()
+#        print 'size velo', self.velo.discreteField[self.velo._fieldId]
+        self.div.discretize(self.vorti.discreteField[self.vorti._fieldId], self.velo.discreteField[self.velo._fieldId], topology=self.topo3D)
+        self.result = self.div.discreteOperator.apply()
+        self.resol = self.topo3D.mesh.resolution
+        self.FinalTime = 0.2
+        self.anal=np.vectorize(self.analyticalDivProduct)(self.topo3DnoG.mesh.coords[0], \
+                                                     self.topo3DnoG.mesh.coords[1], \
+                                                     self.topo3DnoG.mesh.coords[2])
+        # Comparison with analytical solution 
+#        npt.assert_array_less(abs(self.anal[0][self.ghosts[0]:self.nbPts[0]+self.ghosts[0],\
+#                self.ghosts[1]:self.nbPts[1]+self.ghosts[1],\
+#                self.ghosts[2]:self.nbPts[2]+self.ghosts[2]]-self.result[0]), self.e)
+#        npt.assert_array_less(abs(self.anal[1][self.ghosts[0]:self.nbPts[0]+self.ghosts[0],\
+#                self.ghosts[1]:self.nbPts[1]+self.ghosts[1],\
+#                self.ghosts[2]:self.nbPts[2]+self.ghosts[2]]-self.result[1]), self.e)
+#        npt.assert_array_less(abs(self.anal[2][self.ghosts[0]:self.nbPts[0]+self.ghosts[0],\
+#                self.ghosts[1]:self.nbPts[1]+self.ghosts[1],\
+#                self.ghosts[2]:self.nbPts[2]+self.ghosts[2]]-self.result[2]), self.e)
+#        print "max:",np.max(abs(self.anal[2][self.ghosts[0]:self.nbPts[0]+self.ghosts[0],\
+#                    self.ghosts[1]:self.nbPts[1]+self.ghosts[1],\
+#                    self.ghosts[2]:self.nbPts[2]+self.ghosts[2]]-self.result[2]))
+
+        npt.assert_array_less(abs(self.anal[0]-self.result[0]), self.e)
+        npt.assert_array_less(abs(self.anal[1]-self.result[1]), self.e)
+        npt.assert_array_less(abs(self.anal[2]-self.result[2]), self.e)
+        print "max:",np.max(abs(self.anal[2]-self.result[2]))
+
+    def vitesse(self, x, y, z):
+        vx = np.sin(x)
+        vy = np.sin(y)
+        vz = np.sin(z)
+        return vx, vy, vz
+
+    def vorticite(self, x, y, z):
+        wx = 1.
+        wy = 1.
+        wz = 1.
+        return wx, wy, wz
+
+    def analyticalDivProduct(self, x, y, z):
+        sx = np.cos(x)
+        sy = np.cos(y)
+        sz = np.cos(z)
+        return sx, sy, sz
+
+    def runTest(self):
+        self.setup()
+        self.testOperatorDiv()
+
+def suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(test_DivProduct))
+    return suite
+
+if __name__ == "__main__":
+    unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/HySoP/hysop/test/test_operator/test_Forces.py b/HySoP/hysop/test/test_operator/test_Forces.py
new file mode 100755
index 0000000000000000000000000000000000000000..ae5f758e667570be47ad2f9380c3ea2e9c8f5c4b
--- /dev/null
+++ b/HySoP/hysop/test/test_operator/test_Forces.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+import time
+from parmepy.physics.compute_forces import Compute_forces
+import parmepy as pp
+import numpy as np
+from math import *
+import unittest
+
+
+
+class test_Forces(unittest.TestCase):
+    """
+    DiscreteVariable test class
+    """
+
+    def vitesse(self,x, y, z):
+#        vx = 1. + x
+#        vy = - x * y
+#        vz = x * y * z + 10.
+        amodul = np.cos(np.pi*self.t/3)
+        pix = np.pi*x
+        piy = np.pi*y
+        piz = np.pi*z
+        pi2x = 2.*pix
+        pi2y = 2.*piy
+        pi2z = 2.*piz
+        vx = 2.*np.sin(pix)*np.sin(pix)*np.sin(pi2y)*np.sin(pi2z)*amodul
+        vy = -np.sin(pi2x)*np.sin(piy)*np.sin(piy)*np.sin(pi2z)*amodul
+        vz = -np.sin(pi2x)*np.sin(piz)*np.sin(piz)*np.sin(pi2y)*amodul
+        return vx, vy, vz
+
+    def vorticite(self,x, y, z):
+#        wx = x * y
+#        wy = y * z
+#        wz = - y
+        amodul = np.cos(np.pi*self.t/3)
+        pix = np.pi*x
+        piy = np.pi*y
+        piz = np.pi*z
+        pi2x = 2.*pix
+        pi2y = 2.*piy
+        pi2z = 2.*piz
+        wx = 2.* np.pi * np.sin(pi2x) * amodul*( - np.cos(pi2y)*np.sin(piz)*np.sin(piz)+ np.sin(piy)*np.sin(piy)*np.cos(pi2z) )
+        wy = 2.* np.pi * np.sin(pi2y) * amodul*( 2.*np.cos(pi2z)*np.sin(pix)*np.sin(pix)+ np.sin(piz)*np.sin(piz)*np.cos(pi2x) )
+        wz = -2.* np.pi * np.sin(pi2z) * amodul*( np.cos(pi2x)*np.sin(piy)*np.sin(piy)+ np.sin(pix)*np.sin(pix)*np.cos(pi2y) )
+        return wx, wy, wz
+
+    def scalaire(self,x, y, z):
+        if x < 0.5 and y < 0.5 and z < 0.5:
+            return 1.
+        else:
+            return 0.
+
+
+    def testComputeForces(self):
+        # Parameters
+        nb = 11
+        timeStep = 0.09
+        finalTime = 0.36
+        self.t = 0.
+        t0 = time.time()
+
+        ## Domain
+        box = pp.Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+
+        ## Obstacle
+        sphere = pp.Obstacle(box, zlayer=0.1, radius=0.1,
+                             center=[0.5, 0.5, 0.5], name='sphere',
+                             orientation='West', porousLayer=0.05)
+
+        ## Fields
+        velo = pp.AnalyticalField(domain=box, formula=self.vitesse, name='Velocity', vector=True)
+        vorti = pp.AnalyticalField(domain=box, formula=self.vorticite, name='Vorticity', vector=True)
+
+        ## Solver creation (discretisation of objects is done in solver initialisation)
+        topo3D = pp.CartesianTopology(domain=box, resolution=[nb, nb, nb], dim=3, ghosts=[2,2,2])
+
+        ## Fields discretization
+        vorti.discretize(topo3D)
+        velo.discretize(topo3D)
+        vorti.initialize()
+        velo.initialize()
+
+        # Forces computation
+        Re = 200.
+        noca = Compute_forces(topo3D, sphere, boxMin= [0.2, 0.2, 0.2], boxMax=[0.8, 0.8, 0.8])
+        if (topo3D.rank == 0):
+            f = open('./parmepy/test/test_operator/NocaForces.dat', 'w')
+
+        while (self.t <= finalTime):
+            nocares = noca.apply(self.t, timeStep, velo.discreteField[velo._fieldId], vorti.discreteField[vorti._fieldId], Re)
+            if (topo3D.rank == 0):
+                # print time and forces values in the following order : time, cX, cY, cZ
+                f.write("%s   %s   %s   %s\n" % (self.t, nocares[0], nocares[1], nocares[2]))
+
+
+            self.t = self.t + timeStep
+
+        if (topo3D.rank == 0):
+            f.close()
+
+        t1 = time.time()
+        tf = time.time()
+
+        print "\n"
+        print "Total time : ", tf - t0, "sec (CPU)"
+        print "Init time : ", t1 - t0, "sec (CPU)"
+        print "Solving time : ", tf - t1, "sec (CPU)"
+
+
+    def runTest(self):
+        self.testComputeForces()
+
+def suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(test_Forces))
+    return suite
+
+if __name__ == "__main__":
+    unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/HySoP/hysop/test/test_operator/test_Grad.py b/HySoP/hysop/test/test_operator/test_Grad.py
new file mode 100755
index 0000000000000000000000000000000000000000..12697261faabddc0685cfb9ea9b1c2d761d9cd89
--- /dev/null
+++ b/HySoP/hysop/test/test_operator/test_Grad.py
@@ -0,0 +1,146 @@
+# -*- coding: utf-8 -*-
+import unittest
+
+#import parmepy
+
+from parmepy.operator.transport import *
+from parmepy.operator.continuous import *
+from parmepy.operator.differentialOperator import *
+from parmepy.operator.stretching import *
+from parmepy.fields.discrete import *
+from parmepy.fields.continuous import *
+from parmepy.fields.analytical import *
+from parmepy.domain.topology import *
+from parmepy.domain.box import *
+from parmepy.constants import *
+from parmepy.particular_solvers.basic import *
+from parmepy.particular_solvers.integrator.euler import *
+from parmepy.particular_solvers.solver import *
+import numpy as np
+import numpy.testing as npt
+import math
+
+
+class test_Grad(unittest.TestCase):
+    """
+    DiscreteVariable test class
+    """
+    def setup(self):
+        self.e = 0.0001  # Accepted error between numerical and analytical results
+        self.dim = 3
+        self.boxLength = [2.*np.pi, 2.*np.pi, 2.*np.pi]
+        self.boxMin = [ 0., 0., 0.]
+        self.nbPts = [33, 33, 33]
+        self.t = 0.
+        self.timeStep = 0.02
+        self.box = Box(dimension=self.dim,
+                       length=self.boxLength,
+                       origin=self.boxMin)
+
+    def testOperatorGrad(self):
+        # Continuous fields and operator declaration
+        self.velo = AnalyticalField(domain=self.box, formula=self.vitesse, name='Velocity', vector=True)
+        self.grad = DifferentialOperator(self.velo, self.velo, choice='gradV')
+        # Topology definition
+        self.topo3D = CartesianTopology(domain=self.box, resolution=self.nbPts, dim=self.dim, ghosts=[2,2,2])
+        self.topo3DnoG = CartesianTopology(domain=self.box, resolution=self.nbPts, dim=self.dim, ghosts=[0,0,0])
+        self.result = [np.ones((self.topo3D.mesh.resolution), dtype=PARMES_REAL, order=ORDER) for d in xrange(self.dim * self.dim)]
+        # Fields and operator discretization
+        self.velo.discretize(self.topo3D)
+        self.velo.initialize()
+        self.grad.discretize(self.velo.discreteField[self.velo._fieldId], self.velo.discreteField[self.velo._fieldId], topology=self.topo3D)
+        self.result, maxgersh = self.grad.discreteOperator.apply()
+        self.FinalTime = 0.02
+        self.analX = np.vectorize(self.gradientUx)(self.topo3DnoG.mesh.coords[0], \
+                                                     self.topo3DnoG.mesh.coords[1], \
+                                                     self.topo3DnoG.mesh.coords[2])
+        self.analY = np.vectorize(self.gradientUy)(self.topo3DnoG.mesh.coords[0], \
+                                                     self.topo3DnoG.mesh.coords[1], \
+                                                     self.topo3DnoG.mesh.coords[2])
+        self.analZ = np.vectorize(self.gradientUz)(self.topo3DnoG.mesh.coords[0], \
+                                                     self.topo3DnoG.mesh.coords[1], \
+                                                     self.topo3DnoG.mesh.coords[2])
+        # Comparison with analytical solution 
+#        print "max:",np.max(abs(self.anal[0]-self.result[0]))
+        ind0a = self.topo3D.ghosts[0]
+        ind0b = self.topo3D.mesh.resolution[0]-self.topo3D.ghosts[0]
+        ind1a = self.topo3D.ghosts[1]
+        ind1b = self.topo3D.mesh.resolution[1]-self.topo3D.ghosts[1]
+        ind2a = self.topo3D.ghosts[2]
+        ind2b = self.topo3D.mesh.resolution[2]-self.topo3D.ghosts[2]
+
+        npt.assert_array_less(abs(self.analX[0] - \
+                                            self.result[0][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+#        print "max:",np.max(abs(self.anal[1]-self.result[1]))
+        npt.assert_array_less(abs(self.analX[1] - \
+                                            self.result[1][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+#        print "max:",np.max(abs(self.anal[2]-self.result[2]))
+        npt.assert_array_less(abs(self.analX[2] - \
+                                            self.result[2][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+
+        npt.assert_array_less(abs(self.analY[0] - \
+                                            self.result[3][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+#        print "max:",np.max(abs(self.anal[1]-self.result[1]))
+        npt.assert_array_less(abs(self.analY[1] - \
+                                            self.result[4][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+#        print "max:",np.max(abs(self.anal[2]-self.result[2]))
+        npt.assert_array_less(abs(self.analY[2] - \
+                                            self.result[5][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+
+        npt.assert_array_less(abs(self.analZ[0] - \
+                                            self.result[6][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+#        print "max:",np.max(abs(self.anal[1]-self.result[1]))
+        npt.assert_array_less(abs(self.analZ[1] - \
+                                            self.result[7][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+#        print "max:",np.max(abs(self.anal[2]-self.result[2]))
+        npt.assert_array_less(abs(self.analZ[2] - \
+                                            self.result[8][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+
+    def vitesse(self, x, y, z):
+#        amodul = np.cos(np.pi*self.t/3)
+#        pix = np.pi*x
+#        piy = np.pi*y
+#        piz = np.pi*z
+#        pi2x = 2.*pix
+#        pi2y = 2.*piy
+#        pi2z = 2.*piz
+#        vx = 2.*np.sin(pix)*np.sin(pix)*np.sin(pi2y)*np.sin(pi2z)*amodul
+#        vy = -np.sin(pi2x)*np.sin(piy)*np.sin(piy)*np.sin(pi2z)*amodul
+#        vz = -np.sin(pi2x)*np.sin(piz)*np.sin(piz)*np.sin(pi2y)*amodul
+#        vx = np.cos(y)
+#        vy = np.cos(z)
+#        vz = np.cos(x)
+        vx = np.cos(y) * np.cos(z)
+        vy = np.cos(z) * np.cos(x)
+        vz = np.cos(x) * np.cos(y)
+        return vx, vy, vz
+
+    def gradientUx(self, x, y, z):
+        dUxdx = 0. 
+        dUxdy = - np.sin(y) * np.cos(z)
+        dUxdz = - np.sin(z) * np.cos(y)
+        return dUxdx, dUxdy, dUxdz
+
+    def gradientUy(self, x, y, z):
+        dUydx = - np.sin(x) * np.cos(z)
+        dUydy = 0.
+        dUydz = - np.sin(z) * np.cos(x)
+        return dUydx, dUydy, dUydz
+
+    def gradientUz(self, x, y, z):
+        dUzdx = - np.sin(x) * np.cos(y)
+        dUzdy = - np.sin(y) * np.cos(x)
+        dUzdz = 0.
+        return dUzdx, dUzdy, dUzdz
+
+    def runTest(self):
+        self.setup()
+        self.testOperatorGrad()
+
+def suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(test_Grad))
+    return suite
+
+if __name__ == "__main__":
+    unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/HySoP/hysop/test/test_operator/test_GradUomega.py b/HySoP/hysop/test/test_operator/test_GradUomega.py
new file mode 100755
index 0000000000000000000000000000000000000000..67be93486cc7454d5eb53b83321ab8f680a3cee9
--- /dev/null
+++ b/HySoP/hysop/test/test_operator/test_GradUomega.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+import unittest
+
+#import parmepy
+
+from parmepy.operator.transport import *
+from parmepy.operator.continuous import *
+from parmepy.operator.differentialOperator import *
+from parmepy.operator.fct2op import *
+from parmepy.operator.stretching import *
+from parmepy.fields.discrete import *
+from parmepy.fields.continuous import *
+from parmepy.fields.analytical import *
+from parmepy.domain.topology import *
+from parmepy.domain.box import *
+from parmepy.constants import *
+from parmepy.particular_solvers.basic import *
+from parmepy.particular_solvers.integrator.euler import *
+from parmepy.particular_solvers.solver import *
+import numpy as np
+import numpy.testing as npt
+import math
+
+
+class test_GradUomega(unittest.TestCase):
+    """
+    DiscreteVariable test class
+    """
+    def setup(self):
+        self.e = 0.0001  # Accepted error between numerical and analytical results
+        self.dim = 3
+        self.boxLength = [2.*np.pi, 2.*np.pi, 2.*np.pi]
+        self.boxMin = [ 0., 0., 0.]
+        self.nbPts = [33, 33, 33]
+        self.t = 0.
+        self.timeStep = 0.02
+        self.box = Box(dimension=self.dim,
+                       length=self.boxLength,
+                       origin=self.boxMin)
+
+    def testOperatorGradUomega(self):
+        # Continuous fields and operator declaration
+        self.velo = AnalyticalField(domain=self.box, formula=self.vitesse, name='Velocity', vector=True)
+        self.curl = DifferentialOperator(self.velo, self.velo, choice='curl')
+        self.grad = DifferentialOperator(self.velo, self.velo, choice='gradV')
+        # Topology definition
+        self.topo3D = CartesianTopology(domain=self.box, resolution=self.nbPts, dim=self.dim, ghosts=[2,2,2])
+        self.topo3DnoG = CartesianTopology(domain=self.box, resolution=self.nbPts, dim=self.dim, ghosts=[0,0,0])
+        self.omega = [np.ones((self.topo3D.mesh.resolution), dtype=PARMES_REAL, order=ORDER) for d in xrange(self.dim)]
+        self.gradientU = [np.ones((self.topo3D.mesh.resolution), dtype=PARMES_REAL, order=ORDER) for d in xrange(self.dim * self.dim)]
+        self.result = [np.ones((self.topo3D.mesh.resolution), dtype=PARMES_REAL, order=ORDER) for d in xrange(self.dim)]
+        # Fields and operator discretization
+        self.velo.discretize(self.topo3D)
+        self.velo.initialize()
+        self.curl.discretize(self.velo.discreteField[self.velo._fieldId], self.velo.discreteField[self.velo._fieldId], topology=self.topo3D)
+        self.grad.discretize(self.velo.discreteField[self.velo._fieldId], self.velo.discreteField[self.velo._fieldId], topology=self.topo3D)
+        self.omega = self.curl.discreteOperator.apply()
+        self.gradientU, maxgersh = self.grad.discreteOperator.apply()
+        self.gradUomega = Fct2Op(self.omega, self.gradientU, choice = 'gradV', topology=self.topo3D)
+        self.result = self.gradUomega.apply(self.t, self.omega)
+
+        self.FinalTime = 0.02
+        self.anal = np.vectorize(self.stretch)(self.topo3DnoG.mesh.coords[0], \
+                                                     self.topo3DnoG.mesh.coords[1], \
+                                                     self.topo3DnoG.mesh.coords[2])
+
+
+        # Comparison with analytical solution 
+#        print "max:",np.max(abs(self.anal[0]-self.result[0]))
+        ind0a = self.topo3D.ghosts[0]
+        ind0b = self.topo3D.mesh.resolution[0]-self.topo3D.ghosts[0]
+        ind1a = self.topo3D.ghosts[1]
+        ind1b = self.topo3D.mesh.resolution[1]-self.topo3D.ghosts[1]
+        ind2a = self.topo3D.ghosts[2]
+        ind2b = self.topo3D.mesh.resolution[2]-self.topo3D.ghosts[2]
+
+        npt.assert_array_less(abs(self.anal[0] - \
+                                            self.result[0][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+#        print "max:",np.max(abs(self.anal[1]-self.result[1]))
+        npt.assert_array_less(abs(self.anal[1] - \
+                                            self.result[1][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+#        print "max:",np.max(abs(self.anal[2]-self.result[2]))
+        npt.assert_array_less(abs(self.anal[2] - \
+                                            self.result[2][ind0a:ind0b,ind1a:ind1b,ind2a:ind2b]), self.e)
+
+    def vitesse(self, x, y, z):
+#        amodul = np.cos(np.pi*self.t/3)
+#        pix = np.pi*x
+#        piy = np.pi*y
+#        piz = np.pi*z
+#        pi2x = 2.*pix
+#        pi2y = 2.*piy
+#        pi2z = 2.*piz
+#        vx = 2.*np.sin(pix)*np.sin(pix)*np.sin(pi2y)*np.sin(pi2z)*amodul
+#        vy = -np.sin(pi2x)*np.sin(piy)*np.sin(piy)*np.sin(pi2z)*amodul
+#        vz = -np.sin(pi2x)*np.sin(piz)*np.sin(piz)*np.sin(pi2y)*amodul
+#        vx = np.cos(y)
+#        vy = np.cos(z)
+#        vz = np.cos(x)
+        vx = np.cos(y) * np.cos(z)
+        vy = np.cos(z) * np.cos(x)
+        vz = np.cos(x) * np.cos(y)
+        return vx, vy, vz
+
+    def stretch(self, x, y, z):
+        sx = - np.cos(z) * np.cos(y) * ( np.sin(x) * np.sin(y) - np.sin(z) * np.sin(x))
+        sy = - np.cos(z) * np.cos(x) * ( - np.sin(x) * np.sin(y) + np.sin(z) * np.sin(y))
+        sz = - np.cos(y) * np.cos(x) * ( np.sin(x) * np.sin(z) - np.sin(y) * np.sin(z))
+        return sx, sy, sz
+
+    def runTest(self):
+        self.setup()
+        self.testOperatorGrad()
+
+def suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(test_GradUomega))
+    return suite
+
+if __name__ == "__main__":
+    unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/HySoP/hysop/test/test_operator/test_Penalization.py b/HySoP/hysop/test/test_operator/test_Penalization.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f5529385a68804bfec3c260a08f6e5295333a77
--- /dev/null
+++ b/HySoP/hysop/test/test_operator/test_Penalization.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+import unittest
+import time
+import parmepy as pp
+import numpy as np
+from parmepy.particular_solvers.integrator.runge_kutta4 import RK4
+import numpy.testing as npt
+from parmepy.constants import *
+from math import *
+
+def vitesse(x, y, z):
+    vx = 2.
+    vy = 2.
+    vz = 2.
+    return vx, vy, vz
+
+def vorticite(x, y, z):
+    wx = 3.
+    wy = 3.
+    wz = 3.
+    return wx, wy, wz
+
+
+def run():
+    # Parameters
+    nb = 129
+    timeStep = 0.09
+    finalTime = 0.09
+    outputFilePrefix = './parmepy/test/test_operator/Penalization_'
+    outputModulo = 1
+
+    t0 = time.time()
+
+    ## Domain
+    box = pp.Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+
+    ## Obstacle
+    lambd=np.array([0, 10, 10**8],dtype = PARMES_REAL, order=ORDER)
+    sphere = pp.Obstacle(box, zlayer=0.1, radius=0.2, center=[0.5, 0.5, 0.5],
+                         name='sphere', orientation='West', porousLayer=0.1)
+
+    ## ChiDomain
+    chiDomain = pp.ContinuousField(domain=box, name='ChiDomain', vector=False)
+
+    ## Fields
+    velo = pp.AnalyticalField(domain=box, formula=vitesse, name='Velocity', vector=True)
+    vorti = pp.AnalyticalField(domain=box, formula=vorticite, name='Vorticity', vector=True)
+
+    ## Operators
+    penal = pp.Penalization(velo, vorti, sphere, lambd)
+
+    ## Solver creation (discretisation of objects is done in solver initialisation)
+    topo3D = pp.CartesianTopology(domain=box, resolution=[nb, nb, nb], dim=3, ghosts=[2,2,2])
+
+    pb = pp.Problem(topo3D, [penal])
+
+    ## Setting solver to Problem
+    pb.setSolver(finalTime, timeStep, solver_type='basic', io=pp.Printer(fields=[velo, vorti], frequency=outputModulo, outputPrefix=outputFilePrefix))
+    pb.solver.ODESolver= RK4# RK3# RK2# Euler#
+    pb.initSolver()
+    t1 = time.time()
+
+    ## Solve problem
+    pb.solve()
+
+    tf = time.time()
+
+    
+    print "\n"
+    print "Total time : ", tf - t0, "sec (CPU)"
+    print "Init time : ", t1 - t0, "sec (CPU)"
+    print "Solving time : ", tf - t1, "sec (CPU)"
+
+
+if __name__ == "__main__":
+    run()
diff --git a/HySoP/hysop/test/test_operator/test_Stretching.py b/HySoP/hysop/test/test_operator/test_Stretching.py
new file mode 100755
index 0000000000000000000000000000000000000000..1ee8a2e576b769bf2975ec8f063cceb84b5a0a6b
--- /dev/null
+++ b/HySoP/hysop/test/test_operator/test_Stretching.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+import time
+from parmepy.particular_solvers.integrator.euler import Euler
+from parmepy.particular_solvers.integrator.runge_kutta2 import RK2
+from parmepy.particular_solvers.integrator.runge_kutta3 import RK3
+from parmepy.particular_solvers.integrator.runge_kutta4 import RK4
+import parmepy as pp
+import numpy as np
+from math import *
+import unittest
+
+
+
+class test_Stretching(unittest.TestCase):
+    """
+    DiscreteVariable test class
+    """
+
+    def vitesse(self,x, y, z):
+#        vx = 1. + x
+#        vy = - x * y
+#        vz = x * y * z + 10.
+        amodul = np.cos(np.pi*self.t/3)
+        pix = np.pi*x
+        piy = np.pi*y
+        piz = np.pi*z
+        pi2x = 2.*pix
+        pi2y = 2.*piy
+        pi2z = 2.*piz
+        vx = 2.*np.sin(pix)*np.sin(pix)*np.sin(pi2y)*np.sin(pi2z)*amodul
+        vy = -np.sin(pi2x)*np.sin(piy)*np.sin(piy)*np.sin(pi2z)*amodul
+        vz = -np.sin(pi2x)*np.sin(piz)*np.sin(piz)*np.sin(pi2y)*amodul
+        return vx, vy, vz
+
+    def vorticite(self,x, y, z):
+#        wx = x * y
+#        wy = y * z
+#        wz = - y
+        amodul = np.cos(np.pi*self.t/3)
+        pix = np.pi*x
+        piy = np.pi*y
+        piz = np.pi*z
+        pi2x = 2.*pix
+        pi2y = 2.*piy
+        pi2z = 2.*piz
+        wx = 2.* np.pi * np.sin(pi2x) * amodul*( - np.cos(pi2y)*np.sin(piz)*np.sin(piz)+ np.sin(piy)*np.sin(piy)*np.cos(pi2z) )
+        wy = 2.* np.pi * np.sin(pi2y) * amodul*( 2.*np.cos(pi2z)*np.sin(pix)*np.sin(pix)+ np.sin(piz)*np.sin(piz)*np.cos(pi2x) )
+        wz = -2.* np.pi * np.sin(pi2z) * amodul*( np.cos(pi2x)*np.sin(piy)*np.sin(piy)+ np.sin(pix)*np.sin(pix)*np.cos(pi2y) )
+        return wx, wy, wz
+
+    def scalaire(self,x, y, z):
+        if x < 0.5 and y < 0.5 and z < 0.5:
+            return 1.
+        else:
+            return 0.
+
+
+    def testOperatorStretching(self):
+        # Parameters
+        nb = 16
+        timeStep = 0.09
+        finalTime = 0.09
+        self.t = 0.
+        t0 = time.time()
+
+        ## Domain
+        box = pp.Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
+
+        ## Fields
+        velo = pp.AnalyticalField(domain=box, formula=self.vitesse, name='Velocity', vector=True)
+        vorti = pp.AnalyticalField(domain=box, formula=self.vorticite, name='Vorticity', vector=True)
+
+        ## Operators
+        stretch = pp.Stretching(velo,vorti)
+
+        ## Solver creation (discretisation of objects is done in solver initialisation)
+        topo3D = pp.CartesianTopology(domain=box, resolution=[nb, nb, nb], dim=3, ghosts=[2.,2.,2.])
+
+        ##Problem
+        pb = pp.Problem(topo3D, [stretch])
+
+        ## Setting solver to Problem
+        pb.setSolver(finalTime, timeStep, solver_type='basic')
+        pb.solver.ODESolver=  RK3#RK2#Euler#RK4# 
+        pb.initSolver()
+
+        t1 = time.time()
+
+        ## Solve problem
+        pb.solve()
+
+        tf = time.time()
+
+        print "\n"
+        print "Total time : ", tf - t0, "sec (CPU)"
+        print "Init time : ", t1 - t0, "sec (CPU)"
+        print "Solving time : ", tf - t1, "sec (CPU)"
+
+
+    def runTest(self):
+        self.testOperatorStretching()
+
+def suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(test_Stretching))
+    return suite
+
+if __name__ == "__main__":
+    unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/HySoP/hysop/test/test_operator/test_transport_d.py b/HySoP/hysop/test/test_operator/test_transport_d.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf2d158e350606acf6478d82facb9fd185a34064
--- /dev/null
+++ b/HySoP/hysop/test/test_operator/test_transport_d.py
@@ -0,0 +1,122 @@
+"""
+Module for testing parmepy.operator.transport_d
+"""
+
+import unittest
+import parmepy as pp
+import numpy as np
+import pyopencl as cl
+
+
+class Transport_dTestCase(unittest.TestCase):
+
+    def setup_OpenCL_basic(self):
+        self.b = pp.Box()
+        self.nbElem = [32, 32, 32]
+        self.topo = pp.CartesianTopology(domain=self.b,
+                                         resolution=self.nbElem,
+                                         dim=len(self.nbElem))
+        build_options = "-cl-single-precision-constant -cl-opt-disable"
+        build_options += " -D WIDTH=32"
+        build_options += " -D WGN=32"
+        build_options += " -D PADDING=0"
+        build_options += " -D BASIC=1"
+        self.prg_basic = cl.Program(self.ctx, self.gpu_src).build(build_options)
+        self.p_positions = pp.fields.continuous.ContinuousField(self.b,
+                                                                name="test_field_positions")
+        self.p_scalar = pp.fields.continuous.ContinuousField(self.b,
+                                                             name="test_field_scalar")
+        self.true_p_positions = pp.fields.analytical.AnalyticalField(self.b,
+                                                                     formula=lambda x,y,z:x,
+                                                                     name="test_field_true_positions")
+        self.g_velocity = pp.fields.analytical.AnalyticalField(self.b,
+                                                              formula=lambda x, y, z: (0., 1., 0.),
+                                                              name="test_field_g_velocity", vector=True)
+        self.g_scalar = pp.fields.analytical.AnalyticalField(self.b,
+                                                              formula=lambda x, y, z: 1.,
+                                                              name="test_field_g_scalar")
+        self.b.discretize(self.topo.resolution)
+        self.pos, self.pos_id = self.p_positions.discretize(self.topo)
+        self.scal, self.velo_id = self.p_scalar.discretize(self.topo)
+        self.gvelo, self.pos_id = self.g_velocity.discretize(self.topo)
+        self.gscal, self.velo_id = self.g_scalar.discretize(self.topo)
+        self.true_pos, self.velo_id = self.true_p_positions.discretize(self.topo)
+        self.true_p_positions.initialize()
+        self.g_velocity.initialize()
+        self.g_scalar.initialize()
+        self.pos.data = np.asarray(self.pos.data, dtype=pp.constants.PARMES_REAL_GPU, order='F')
+        self.scal.data = np.asarray(self.scal.data, dtype=pp.constants.PARMES_REAL_GPU, order='F')
+        self.gvelo.data[0] = np.asarray(self.gvelo.data[0], dtype=pp.constants.PARMES_REAL_GPU, order='F')
+        self.gvelo.data[1] = np.asarray(self.gvelo.data[1], dtype=pp.constants.PARMES_REAL_GPU, order='F')
+        self.gvelo.data[2] = np.asarray(self.gvelo.data[2], dtype=pp.constants.PARMES_REAL_GPU, order='F')
+        self.gscal.data = np.asarray(self.gscal.data, dtype=pp.constants.PARMES_REAL_GPU, order='F')
+        self.pos.gpu_data = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
+                                      size=self.pos.data.nbytes)
+        self.scal.gpu_data = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
+                                       size=self.scal.data.nbytes)
+        self.gvelo.gpu_data = [cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
+                                      size=gvelo.nbytes) for gvelo in self.gvelo.data]
+        self.gscal.gpu_data = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
+                                       size=self.gscal.data.nbytes)
+        cl.enqueue_copy(self.queue, self.gscal.gpu_data, self.gscal.data)
+        cl.enqueue_copy(self.queue, self.gvelo.gpu_data[0], self.gvelo.data[0])
+        cl.enqueue_copy(self.queue, self.gvelo.gpu_data[1], self.gvelo.data[1])
+        cl.enqueue_copy(self.queue, self.gvelo.gpu_data[2], self.gvelo.data[2])
+        self.transport = pp.Transport(self.g_velocity, self.g_scalar)
+        self.transport.discretize(result_position=self.p_positions, result_scalar=self.p_scalar)
+        self.transport.setMethod(pp.particular_solvers.gpu.KernelLauncher(self.prg_basic.advection,
+                                                                          self.queue,
+                                                                          (32, 32, 32), None))
+        self.transport.discreteOperator.init_copy = pp.particular_solvers.gpu.KernelLauncher(self.prg_basic.advec_init_copy,
+                                                                   self.queue,
+                                                                   (32, 32, 32), None)
+        self.transport.discreteOperator.init_transpose = pp.particular_solvers.gpu.KernelListLauncher([self.prg_basic.advec_init_transpose_3D_01,
+                                                                             self.prg_basic.advec_init_transpose_3D_02],
+                                                                                self.queue,
+                                                                                [(2 * int(self.nbElem[0]),
+                                                                                  int(self.nbElem[1]) / 32,
+                                                                                  int(self.nbElem[2])),
+                                                                                 (2 * int(self.nbElem[0]),
+                                                                                  int(self.nbElem[1]),
+                                                                                  int(self.nbElem[2]) / 32)
+                                                                                 ],
+                                                                                [None,
+                                                                                 None])
+
+    def setup(self):
+        try:
+            self.platform = cl.get_platforms()[0]
+            self.device = self.platform.get_devices(cl.device_type.GPU)[0]
+            self.ctx = cl.Context([self.device])
+            self.queue = cl.CommandQueue(self.ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
+            f = open(pp.constants.GPU_SRC, 'r')
+            self.gpu_src = "".join(f.readlines())
+            f.close()
+            self.test = True
+            print "Testing on " + self.device.name
+        except Exception:
+            self.test = False
+            print "No tests because no GPU"
+
+    def tearDown(self):
+        pass
+
+    def test_apply_basic(self):
+        """Test advection basic"""
+        if self.test:
+            self.setup_OpenCL_basic()
+            self.transport.discreteOperator.apply(0., 0.1, 0)
+            print self.scal.domain.step
+            self.queue.finish()
+            cl.enqueue_copy(self.queue, self.pos.data, self.pos.gpu_data)
+            cl.enqueue_copy(self.queue, self.scal.data, self.scal.gpu_data)
+            self.queue.finish()
+            print np.max(self.pos.data),np.min(self.pos.data)
+            print self.pos.data[1,1,1], self.true_pos.data[1,1,1]
+            np.testing.assert_array_almost_equal(self.pos.data, self.true_pos.data)
+            np.testing.assert_array_almost_equal(self.scal.data, self.gscal.data)
+
+if __name__ == "__main__":
+    unittest.TextTestRunner(verbosity=2).run(
+        unittest.TestLoader().loadTestsFromTestCase(Transport_dTestCase)
+        )
diff --git a/HySoP/hysop/test/test_particular_solvers/__init__.py b/HySoP/hysop/test/test_particular_solvers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/HySoP/hysop/test/test_particular_solvers/test_EDO_erreur.py b/HySoP/hysop/test/test_particular_solvers/test_EDO_erreur.py
new file mode 100644
index 0000000000000000000000000000000000000000..c07c30400d9c533aea1ff1e0f16069258d0b7c8e
--- /dev/null
+++ b/HySoP/hysop/test/test_particular_solvers/test_EDO_erreur.py
@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+import time
+import parmepy as pp
+from parmepy.particular_solvers.integrator.euler import Euler
+from parmepy.particular_solvers.integrator.runge_kutta2 import RK2
+from parmepy.particular_solvers.integrator.runge_kutta3 import RK3
+from parmepy.particular_solvers.integrator.runge_kutta4 import RK4
+from math import *
+import unittest
+import numpy as np
+import numpy.testing as npt
+import copy
+import matplotlib.pyplot as plt
+from parmepy.constants import *
+
+
+class test_EDO(unittest.TestCase):
+    """
+    DiscreteVariable test class
+    """
+
+    def analyticalSolution(self,t, x, y, z):
+        sx = (t*np.exp(t) + 1.) * np.exp(-t)
+        sy = (t*np.exp(t) + 1.) * np.exp(-t)
+        sz = (t*np.exp(t) + 1.) * np.exp(-t)
+        return sx, sy, sz
+
+    def f (self, t, u):
+        fx = -u[0][:,:,:] + t + 1.
+        fy = -u[1][:,:,:] + t + 1.
+        fz = -u[2][:,:,:] + t + 1.
+        return fx , fy ,fz
+
+    def testIntegratorEDO(self):
+        # Parameters
+        nb = 32
+        timeStep = 0.1
+        finalTime = 1.0
+        multi = 2.
+        maxerror=0.
+
+        t0 = time.time()
+        self.t = 0.
+        ## Domain
+        box = pp.Box(3, length=[4.*np.pi, 4.*np.pi, 4.*np.pi], 
+                     origin=[- 2.*np.pi,- 2.*np.pi,- 2.*np.pi])
+
+
+#####################################################################################
+
+        ## Solver creation (discretisation of objects is done in solver initialisation)
+        topo3D = pp.CartesianTopology(domain=box, resolution=[nb, nb, nb], dim=3, periods=[False, False, False])
+
+        Result=np.asarray([np.zeros([nb, nb, nb], 
+                          dtype=PARMES_REAL, order=ORDER) for d in xrange(3)])
+        Analvorty=np.asarray([np.zeros([nb, nb, nb], 
+                             dtype=PARMES_REAL, order=ORDER) for d in xrange(3)])
+
+        compteur = 0
+        taille = int(finalTime / timeStep) +1
+        compteur = 0
+        dt = np.zeros([taille], dtype=PARMES_REAL, order=ORDER)
+        errEuler = np.zeros([taille], dtype=PARMES_REAL, order=ORDER)
+        errRK2 = np.zeros([taille], dtype=PARMES_REAL, order=ORDER)
+        errRK3 = np.zeros([taille], dtype=PARMES_REAL, order=ORDER)
+        errRK4 = np.zeros([taille], dtype=PARMES_REAL, order=ORDER)
+        UEuler = np.asarray([np.zeros([nb, nb, nb], dtype=PARMES_REAL, 
+                            order=ORDER) for d in xrange(3)])
+        URK2 = np.asarray([np.zeros([nb, nb, nb], dtype=PARMES_REAL, 
+                          order=ORDER) for d in xrange(3)])
+        URK3 = np.asarray([np.zeros([nb, nb, nb], dtype=PARMES_REAL, 
+                          order=ORDER) for d in xrange(3)])
+        URK4 = np.asarray([np.zeros([nb, nb, nb], dtype=PARMES_REAL, 
+                          order=ORDER) for d in xrange(3)])
+
+        UEuler[:,:,:,:] = 1.
+        URK2[:,:,:,:] = 1.
+        URK3[:,:,:,:] = 1.
+        URK4[:,:,:,:] = 1.
+
+        Analvorty[:,:,:,:]=np.vectorize(self.analyticalSolution)(timeStep,
+                                          topo3D.mesh.coords[0], \
+                                          topo3D.mesh.coords[1], \
+                                          topo3D.mesh.coords[2])[:]
+
+        while self.t < finalTime :
+            print "t", self.t
+
+        # Euler
+            self.method= Euler
+            methodInt=self.method(self.f)
+            Result = methodInt.integrate(self.f, self.t , timeStep , UEuler)
+            UEuler = Result
+            errEuler[compteur]= np.max (abs(Analvorty[:,:,:,:] - Result[:,:,:,:]))
+
+        #  RK2
+            self.method= RK2
+            methodInt=self.method(self.f)
+            Result = methodInt.integrate(self.f, self.t , timeStep , URK2)
+            URK2 = Result
+            errRK2[compteur]= np.max (abs(Analvorty[:,:,:,:] - Result[:,:,:,:]))
+
+        #  RK3
+            self.method= RK3
+            methodInt=self.method(self.f)
+            Result = methodInt.integrate(self.f, self.t , timeStep , URK3)
+            URK3 = Result
+            errRK3[compteur]= np.max (abs(Analvorty[:,:,:,:] - Result[:,:,:,:]))
+
+        #  RK4
+            self.method= RK4
+            methodInt=self.method(self.f)
+            Result = methodInt.integrate(self.f, self.t , timeStep , URK4)
+            URK4 = Result
+            errRK4[compteur]= np.max (abs(Analvorty[:,:,:,:] - Result[:,:,:,:]))
+            dt[compteur] = self.t
+
+            self.t = self.t + timeStep
+            Analvorty[:,:,:,:]=np.vectorize(self.analyticalSolution)(self.t + timeStep, 
+                                          topo3D.mesh.coords[0], \
+                                          topo3D.mesh.coords[1], \
+                                          topo3D.mesh.coords[2])[:]
+            compteur = compteur + 1
+
+        # Check the convergence order of each time integration scheme :
+        npt.assert_array_less(errEuler,timeStep)
+        npt.assert_array_less(errRK2,timeStep**2)
+        npt.assert_array_less(errRK3,timeStep**3)
+        npt.assert_array_less(errRK4,timeStep**4)
+
+        ## Table of error values
+#        print "erreur Euler", errEuler
+#        print "erreur RK2", errRK2
+#        print "erreur RK3", errRK3
+#        print "erreur RK4", errRK4
+
+        ## Plot error_scheme vs time
+#        plt.figure(1)
+#        plt.subplot(211)
+#        plt.xlabel('dt')
+#        plt.xscale('log')
+#        plt.ylabel('Erreur')
+#        plt.yscale('log')
+#        plt.plot(dt, errEuler, '+-',dt ,errRK2, '+-' ,dt, errRK3, '+-',dt,errRK4, '+-')
+#        plt.legend([u"Euler", u"RK2", u"RK3", u"RK4"], loc=4)
+#        plt.subplot(212)
+#        plt.xlabel('dt')
+#        plt.ylabel('Erreur')
+#        plt.plot(dt, errEuler, '+-',dt ,errRK2, '+-' ,dt, errRK3, '+-',dt,errRK4, '+-')
+#        plt.legend([u"Euler", u"RK2", u"RK3", u"RK4"], loc=4)
+
+        plt.show()
+
+        t1 = time.time()
+
+        print "\n"
+#        print "Total time : ", tf - t0, "sec (CPU)"
+#        print "Init time : ", t1 - t0, "sec (CPU)"
+#        print "Solving time : ", tf - t1, "sec (CPU)"
+
+
+    def runTest(self):
+        self.testIntegratorEDO()
+
+def suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(test_EDO))
+    return suite
+
+if __name__ == "__main__":
+    unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/HySoP/hysop/test/test_particular_solvers/test_RK.py b/HySoP/hysop/test/test_particular_solvers/test_RK.py
new file mode 100644
index 0000000000000000000000000000000000000000..70ed6aa89bd3738f3288274aa1189815eb6c011d
--- /dev/null
+++ b/HySoP/hysop/test/test_particular_solvers/test_RK.py
@@ -0,0 +1,154 @@
+# -*- coding: utf-8 -*-
+import time
+import parmepy as pp
+from parmepy.particular_solvers.integrator.euler import Euler
+from parmepy.particular_solvers.integrator.runge_kutta2 import RK2
+from parmepy.particular_solvers.integrator.runge_kutta3 import RK3
+from parmepy.particular_solvers.integrator.runge_kutta4 import RK4
+from parmepy.particular_solvers.integrator.runge_kutta2stretching import RK2Stretch
+from parmepy.particular_solvers.integrator.runge_kutta3stretching import RK3Stretch
+from parmepy.particular_solvers.integrator.runge_kutta4stretching import RK4Stretch
+from math import *
+import unittest
+import numpy as np
+import numpy.testing as npt
+import copy
+import matplotlib.pyplot as plt
+from parmepy.constants import *
+
+
+
+class test_RK(unittest.TestCase):
+    """
+    DiscreteVariable test class
+    """
+
+    def vitesse(self, x, y, z):
+        vx = np.sin(x)
+        vy = np.sin(y)
+        vz = np.sin(z)
+        return vx, vy, vz
+
+    def vorticite(self, x, y, z):
+        wx = self.t**2+1.
+        wy = self.t**2+1.
+        wz = self.t**2+1.
+        return wx, wy, wz
+
+    def analyticalSolution(self,t, x, y, z):
+        sx = t**2 - (t + t**3 /3.) * np.cos(x)
+        sy = t**2 - (t + t**3 /3.) * np.cos(y)
+        sz = t**2 - (t + t**3 /3.) * np.cos(z)
+        return sx, sy, sz
+
+    def f (self, t, u):
+        fx = 2.*t -u[0][:,:,:]
+        fy = 2.*t -u[1][:,:,:]
+        fz = 2.*t -u[2][:,:,:]
+        return fx , fy ,fz
+
+
+    def wgradu (self, t, x, y, z) :
+        wx =( 1 + t**2 )* np.cos(x)
+        wy =( 1 + t**2 )* np.cos(y)
+        wz =( 1 + t**2 )* np.cos(z)
+        return wx, wy, wz
+
+
+    def testIntegratorRK(self):
+        # Parameters
+        nb = 32
+        timeStep = 0.01
+        finalTime = 0.01
+        maxerror=0.
+
+        t0 = time.time()
+        self.t = 0.
+        ## Domain
+        box = pp.Box(3, length=[4.*np.pi, 4.*np.pi, 4.*np.pi], origin=[- 2.*np.pi,- 2.*np.pi,- 2.*np.pi])
+
+
+#####################################################################################
+
+        ## Fields
+#        velo = pp.AnalyticalField(domain=box, formula=self.vitesse, name='Velocity', vector=True)
+#        vorti = pp.AnalyticalField(domain=box, formula=self.vorticite, name='Vorticity', vector=True)
+
+        ## Operators
+#        stretch = pp.Stretching(velo,vorti)
+
+        ## Solver creation (discretisation of objects is done in solver initialisation)
+        topo3D = pp.CartesianTopology(domain=box, resolution=[nb, nb, nb], dim=3)
+
+        ##Problem
+#        pb = pp.Problem(topo3D, [stretch])
+
+        ## Setting solver to Problem
+#        pb.setSolver(finalTime, timeStep, solver_type='basic')
+
+        Result=np.asarray([np.zeros([nb, nb, nb], dtype=PARMES_REAL, order=ORDER) for d in xrange(3)])
+        Analvorty=np.asarray([np.zeros([nb, nb, nb], dtype=PARMES_REAL, order=ORDER) for d in xrange(3)])
+        Analvorty[:,:,:,:]= np.vectorize(self.wgradu)(self.t,topo3D.mesh.coords[0], \
+                                          topo3D.mesh.coords[1], \
+                                          topo3D.mesh.coords[2])[:]
+
+        self.method= RK3 # RK3Stretch #RK2Stretch #RK4Stretch #Euler #RK4 #
+
+        print "\nODE SOLVER :: ", self.method
+
+        methodInt=self.method(self.f)
+        Result = methodInt.integrate(self.f, self.t , timeStep , self.wgradu, topo3D)
+
+        if Euler == self.method :
+            maxerror = abs((timeStep**2 - 1./3. * timeStep**3)-.0) + timeStep**3
+        if RK2 == self.method :
+            maxerror = abs((-1./3. * timeStep**3) - (0.5* timeStep**2)) + timeStep**8
+        if RK3 == self.method :
+            maxerror = abs(0. - (0.5* timeStep**2 - 1./6. * timeStep**3)) + timeStep**3
+        if RK4 == self.method :
+            maxerror = abs((timeStep**2 - 1./3. * timeStep**3) - (7./6.* timeStep**2 - 1./2. * timeStep**3 + 1./8. * timeStep**4)) 
+
+
+#        pb.initSolver()
+        t1 = time.time()
+
+        ## Solve problem
+#        pb.solve()
+        self.t = self.t + timeStep
+        Analvorty[:,:,:,:]=Analvorty[:,:,:,:]+np.vectorize(self.analyticalSolution)(self.t, topo3D.mesh.coords[0], \
+                                            topo3D.mesh.coords[1], \
+                                            topo3D.mesh.coords[2])[:]
+
+## Visual comparison between the numerical resolution and the analytical resolution
+        for i in range(1):
+            plt.figure(1)
+            plt.subplot(211)
+            plt.axis([-2*np.pi,2*np.pi,np.min(Analvorty[i][:,0,0]),np.max(Analvorty[i][:,0,0])])
+            plt.plot(topo3D.mesh.coords[0][:,0,0], Analvorty[i][:,0,0], '-' ,topo3D.mesh.coords[0][:,0,0],Result[i][:,0,0], '-' )
+            plt.legend([u"Solution Analytique", u"Solution Numérique"])
+            plt.subplot(212)
+            plt.axis([-2*np.pi,2*np.pi,0,max(abs(Analvorty[i][:,0,0] - Result[i][:,0,0]))])
+            plt.plot(topo3D.mesh.coords[0][:,0,0], abs(Analvorty[i][:,0,0] - Result[i][:,0,0]), '-' )
+            plt.legend([u"Erreur"])
+            plt.ax = plt.gca()
+            plt.ax.ticklabel_format(style='sci', axis='y') 
+            plt.show()
+        npt.assert_array_less(abs(Analvorty[:,:,:,:] - Result[:,:,:,:]),maxerror)
+        tf = time.time()
+
+        print "\n"
+        print "Total time : ", tf - t0, "sec (CPU)"
+        print "Init time : ", t1 - t0, "sec (CPU)"
+        print "Solving time : ", tf - t1, "sec (CPU)"
+
+
+    def runTest(self):
+        self.testIntegratorRK()
+
+def suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(test_RK))
+    return suite
+
+if __name__ == "__main__":
+    unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/HySoP/hysop/test/test_particular_solvers/test_euler.py b/HySoP/hysop/test/test_particular_solvers/test_euler.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d48972184ef9c028b3c67076bfa51f29d460afa
--- /dev/null
+++ b/HySoP/hysop/test/test_particular_solvers/test_euler.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+import unittest
+from parmepy.fields.discrete import *
+from parmepy.fields.continuous import *
+from parmepy.fields.analytical import *
+from parmepy.domain.topology import *
+from parmepy.domain.box import *
+from parmepy.constants import *
+from parmepy.particular_solvers.integrator.integrator import ODESolver
+from parmepy.particular_solvers.integrator.euler import Euler
+import numpy as np
+import numpy.testing as npt
+import math
+
+
+class test_Euler(unittest.TestCase):
+    """
+    Euler test
+    solve u'(t) = f( u(t),t )
+    u(t) = a t + U0
+    f(u,t) = a + (u - (at + U0))^4
+    """
+    def setup(self):
+        self.e = 0.0002  # Accepted error between result and analytical result
+        self.dim = 3
+        self.boxLength = [1., 1., 1.]
+        self.boxMin = [ 0., 0., 0.]
+        self.nbPts = [6, 6, 6]
+        self.box = Box(dimension=self.dim,
+                       length=self.boxLength,
+                       origin=self.boxMin)
+        self.dt = 0.1
+        self.FinalTime = 3
+        self.U0 = [1.0 , 2.0, 3.0 ]
+
+    def testEulerInt(self):
+        # Continuous fields and operator declaration
+        self.velo = AnalyticalField(domain=self.box, formula=self.vitesse, name='Velocity', vector=True)
+        # Topology definition / Fields and operator discretization
+        self.result = [np.ones((self.nbPts), dtype=PARMES_REAL, order=ORDER) for d in xrange(self.dim)]
+        self.topo3D = CartesianTopology(domain=self.box, resolution=self.nbPts, dim=self.dim)
+        self.velo.discretize(self.topo3D)
+        self.velo.initialize()
+        self.resol = self.topo3D.mesh.resolution
+        #test for t=0 and initialization of the fonction f
+        t = 0.
+        fctInter = [np.ones((self.nbPts), dtype=PARMES_REAL, order=ORDER) for d in xrange(self.dim)]
+        method = Euler()
+
+        #Solution calculated with Euler
+        fctInter = self.fctTest(t,self.velo.discreteField[self.velo._fieldId].data[0]\
+                                            ,self.velo.discreteField[self.velo._fieldId].data[1]\
+                                            ,self.velo.discreteField[self.velo._fieldId].data[2])
+        for d in xrange(self.dim) :
+            self.result[d][...] = method.integrate(self.velo.discreteField[self.velo._fieldId], fctInter, t, self.dt, d )
+
+        t=t+self.dt
+
+        #Analytical solution
+        self.anal=self.analyticalSolution(t,self.velo.discreteField[self.velo._fieldId].data[0]\
+                                            ,self.velo.discreteField[self.velo._fieldId].data[1]\
+                                            ,self.velo.discreteField[self.velo._fieldId].data[2])
+        npt.assert_array_less(abs(self.anal[0]-self.result[0]), self.e)
+        npt.assert_array_less(abs(self.anal[1]-self.result[1]), self.e)
+        npt.assert_array_less(abs(self.anal[2]-self.result[2]), self.e)
+        
+        # time loop
+        while t < self.FinalTime :
+            #print "T=" , t
+            fctInter = self.fctTest(t,self.result[0],self.result[1],self.result[2])
+            for d in xrange(self.dim) :
+                self.result[d][...] = method.integrate(self.result, fctInter, t, self.dt, d )
+
+            t = t + self.dt
+
+            self.anal=self.analyticalSolution(t,self.velo.discreteField[self.velo._fieldId].data[0]\
+                                            ,self.velo.discreteField[self.velo._fieldId].data[1]\
+                                            ,self.velo.discreteField[self.velo._fieldId].data[2])
+        # Comparison with analytical solution 
+            npt.assert_array_less(abs(self.anal[0]-self.result[0]), self.e)
+            npt.assert_array_less(abs(self.anal[1]-self.result[1]), self.e)
+            npt.assert_array_less(abs(self.anal[2]-self.result[2]), self.e)
+
+
+    def vitesse(self, x, y, z):
+        vx = 1.0
+        vy = 2.0
+        vz = 3.0
+        return vx, vy, vz
+
+    def fctTest(self, t, x, y, z):
+        wx = 0.2 +( x - self.analyticalSolution(t, 1., 2., 3.)[0])**4
+        wy = 0.5  +( y - self.analyticalSolution(t, 1., 2., 3.)[1])**4
+        wz = 1. +( z - self.analyticalSolution(t, 1., 2., 3.)[2])**4
+        return [wx,wy,wz]
+
+    def analyticalSolution(self, t, x, y, z):
+        sx = 0.2 *t + x
+        sy = 0.5 *t + y
+        sz = 1. *t + z
+        return [sx, sy, sz]
+
+    def runTest(self):
+        self.setup()
+        self.testOperatorDiv()
+
+
+def suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(test_Euler))
+    return suite
+
+if __name__ == "__main__":
+    unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/HySoP/hysop/test/test_tools/__init__.py b/HySoP/hysop/test/test_tools/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/HySoP/hysop/tools/__init__.pyc b/HySoP/hysop/tools/__init__.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0f0cfb9dda4a4cf1a8cbc506ed8f22c496a399cd
Binary files /dev/null and b/HySoP/hysop/tools/__init__.pyc differ
diff --git a/HySoP/hysop/tools/__pycache__/io_utils.cpython-27-PYTEST.pyc b/HySoP/hysop/tools/__pycache__/io_utils.cpython-27-PYTEST.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b26a5b9cafa823fd09fb531e0c99142dac086ae9
Binary files /dev/null and b/HySoP/hysop/tools/__pycache__/io_utils.cpython-27-PYTEST.pyc differ
diff --git a/HySoP/hysop/tools/__pycache__/remeshing_formula_parsing.cpython-27-PYTEST.pyc b/HySoP/hysop/tools/__pycache__/remeshing_formula_parsing.cpython-27-PYTEST.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2a1620ea1cf739406e8307644f104246ac051a76
Binary files /dev/null and b/HySoP/hysop/tools/__pycache__/remeshing_formula_parsing.cpython-27-PYTEST.pyc differ
diff --git a/HySoP/hysop/tools/indices.py b/HySoP/hysop/tools/indices.py
new file mode 100644
index 0000000000000000000000000000000000000000..24fcdb644a1e16ee6afc306df7145e1537841466
--- /dev/null
+++ b/HySoP/hysop/tools/indices.py
@@ -0,0 +1,34 @@
+import numpy as np
+import parmepy.tools.numpywrappers as npw
+
+
+def condition2Slice(cond):
+    dim = len(cond.shape)
+    ilist = np.where(cond)
+    if ilist[0].size == 0:
+        isEmpty = True
+        sl = [slice(0, 0) for i in xrange(dim)]
+        resol = np.asarray([0] * dim)
+    else:
+        start = np.asarray([ilist[i].min() for i in xrange(dim)])
+        end = np.asarray([ilist[i].max() + 1 for i in xrange(dim)])
+        sl = [slice(start[i], end[i])
+              for i in xrange(dim)]
+        resol = end - start
+        isEmpty = False
+
+    return sl, isEmpty, resol
+
+
+def removeLastPoint(cond):
+
+    shape = cond.shape
+    dim = len(shape)
+    ilist = np.where(cond)
+    end = [ilist[i].max() for i in xrange(dim)]
+    subl = [np.where(ilist[i] == end[i]) for i in xrange(dim)]
+    for sl in subl:
+        sublist = [ilist[i][sl] for i in xrange(dim)]
+        sublist = tuple(sublist)
+        cond[sublist] = False
+    return cond
diff --git a/HySoP/hysop/tools/io_utils.py b/HySoP/hysop/tools/io_utils.py
index 8225c2e36a8bae01002e250dd5a5a5a6bc0c5e0d..130d130896db7a0f2a30d2dcdb4102b2be536d94 100644
--- a/HySoP/hysop/tools/io_utils.py
+++ b/HySoP/hysop/tools/io_utils.py
@@ -9,14 +9,18 @@ from inspect import getouterframes, currentframe
 import parmepy.mpi as mpi
 from parmepy.tools.sys_utils import SysUtils as su
 from re import findall, IGNORECASE
+from parmepy.tools.parameters import MPI_params, IO_params
 
 
 class io(object):
     """
     Static class with utilities for file i/o
     """
+
+    _default_path = None
+
     @staticmethod
-    def defaultPath():
+    def default_path():
         """
         Get the default path for io.
         Used name if set. If not, get basename of the
@@ -31,8 +35,13 @@ class io(object):
         # inspect.getouterframes(inspect.currentframe())[-1]
         # Warning FP : the behavior of python and ipython is different for
         # this command.
+        if io._default_path is not None:
+            return io._default_path
+
         a = getouterframes(currentframe())
         ind = -1
+        interactive_path = './interactive/p' + str(mpi.main_size)
+        interactive_path = os.path.abspath(interactive_path)
         # --- ipython ---
         if su.in_ipython():
             sublist = [i[1] for i in a]
@@ -44,22 +53,32 @@ class io(object):
             if ind > -1:
                 # -- interactive ipython but call with execfile--
                 if len(findall('io_utils', a[ind][1])) > 0:
-                    return './interactive/p' + str(mpi.main_size)
+                    return interactive_path
                 a = a[ind]
             else:
                 # -- interactive ipython without execfile call --
-                return './interactive/p' + str(mpi.main_size)
+                return interactive_path
+
         else:
             # -- python --
             a = a[-1]
             if a[-1] is None or len(findall('py.test', a[1])) > 0:
                 # interactive python
-                return './interactive/p' + str(mpi.main_size)
+                return interactive_path
 
-        return os.path.join(a[1].split('.')[0], 'p' + str(mpi.main_size))
+        apath = os.path.abspath(os.path.dirname(a[1]))
+        sphinxbuild = findall('sphinx-build', a[1])
+        if len(sphinxbuild) > 0:
+            a = a[1]
+        else:
+            a = os.path.basename(a[1]).split('.')[-2]
+            if a.find('__init__') != -1:
+                return interactive_path
+        a = os.path.join(apath, a)
+        return os.path.join(a, 'p' + str(mpi.main_size))
 
     @staticmethod
-    def checkDir(filename, io_rank=0, comm=None):
+    def check_dir(filename, io_rank=0, comm=None):
         """
         Check if the directory of 'filename' exists.
         Creates it if not.
@@ -75,17 +94,29 @@ class io(object):
             if not os.path.exists(d):
                 os.makedirs(d)
 
+    @staticmethod
+    def set_default_path(pathdir):
+        """
+        To set a new default path for parmepy.
+        @param pathdir: the new path
+        Note : /pN will be add to path name, N being the number of MPI process
+        used for the simulation.
+        """
+        io._default_path = pathdir
+        io._default_path = os.path.join(io._default_path,
+                                        'p' + str(mpi.main_size))
+        io.check_dir(io._default_path)
+
 
 class Writer(object):
     """
     Usage :
     \code
-    >>> import parmepy.tools.io_utils as io
-    >>> defaultPath = io.io.defaultPath()
-    >>> params = {"filename": defaultPath + 'r.dat', "writebuffshape": (1, 2)}
-    >>> wr = Writer(params)
+    >>> from parmepy.tools.parameters import IO_params
+    >>> params = IO_params(filename='r.dat')
+    >>> wr = Writer(params, buffshape=(1, 2))
     >>> ite = 3 # current iteration number
-    >>> if wr.doWrite(ite):
+    >>> if wr.do_write(ite):
     ...    wr.buffer[...] = 3.
     ...    wr.write()
     >>> wr.finalize()
@@ -93,72 +124,51 @@ class Writer(object):
     \endcode
     result : buffer is written into r.dat.
     """
-    def __init__(self, params, comm=None):
+    def __init__(self, io_params, buffshape=None, mpi_params=None,
+                 safeIO=True):
         """
-        @param params : dictionnary to set parameters
-        for output file (name, path, ...)
-        available keys are :
-        - filename : file name with full or relative path.
-        Default = data.out, in filepath.
-        If filename contains absolute path, filepath is ignored.
-        - filepath : path to the i/o file.
-        Default =  parmepy.tools.io_utils.io.defaultPath.
-        - frequency : how often output file is written. Default = 1
-        (i.e. every iteration). N means writes every N iteration and
-        0 means write only after last iteration.
-        - io_rank : mpi process rank (in comm)
-         that performs writting. Default = 0
-        - writebuffshape : shape (numpy array) of the output buffer,
-         written in filename. Default = 1.
-        - safeOutput : boolean. Default = True.
+        @param io_params : a parmepy.tools.parameters.IO_params, setup
+        for file ouput (name, location ...)
+        @param buffshape : shape (tuple) of the output/input buffer.
+        Muste be 2D.
+        @param mpi_params : a parmepy.tools.parameters.MPI_params, mpi
+        setup (comm that owns the writer)
+        @param io_rank : rank (in mpi_params.comm) that performs writting
+        . Default = 0
+        @param safeIO : boolean. Default = True.
         True --> open/close file everytime data are written.
         False --> open at init and close during finalize. Cost less but if simu
         crashes, data are lost.
-        @param comm : mpi communicator that handles this
-        writer. Default = parmepy.mpi.main_comm.
         """
-        # Directory for output
-        if "filepath" in params:
-            filepath = params["filepath"]
-        else:
-            filepath = io.defaultPath()
-        # file name
-        if "filename" in params:
-            filename = params["filename"]
-        else:
-            filename = 'data.out'
         # # Absolute path + name for i/o file
         # # Note that if filename contains absolute path
         # # filepath is ignored
-        self.filename = os.path.join(filepath, filename)
+        msg = 'wrong type for io_params arg.'
+        assert isinstance(io_params, IO_params), msg
+        self.filename = io_params.filename
 
         ## How often i/o must occur
-        if "frequency" in params:
-            self.frequency = params["frequency"]
-        else:
-            self.frequency = 1
+        self.frequency = io_params.frequency
+
         ## Rank of the mpi process that performs i/o (if any)
-        if "io_rank" in params:
-            self.io_rank = params["io_rank"]
-        else:
-            self.io_rank = 0
+        self.io_rank = io_params.io_leader
 
-        if comm is None:
-            comm = mpi.main_comm
         ## A reference communicator, just to identify a
         ## process rank for io.
-        self.comm = comm
+        if mpi_params is None:
+            mpi_params = MPI_params()
+        self.mpis = mpi_params
 
         # check if output dir exists, create it if not.
-        io.checkDir(self.filename, self.io_rank, self.comm)
+        io.check_dir(self.filename, self.io_rank, self.mpis.comm)
 
         ## Shape of the output buffer (must be a 2D numpy array)
-        if "writebuffshape" in params:
-            self.buffshape = params["writebuffshape"]
-        else:
-            self.buffshape = (1, 1)
+        if buffshape is None:
+            buffshape = (1, 1)
+        self.buffshape = buffshape
         assert len(self.buffshape) == 2,\
-            '2D shape required : set writebuffshape: (x,y)'
+            '2D shape required : set arg buffshape as a 2D tuple: (x,y)'
+
         ## The buffer (numpy array) that will be printed to a file
         self.buffer = npw.zeros(self.buffshape)
 
@@ -169,42 +179,127 @@ class Writer(object):
         ## False --> open at init and close
         ## during finalize. Cost less but if simu
         ## crashes, data are lost.
-        if "safeOutput" in params:
-            self.safeOutput = params["safeOutput"]
-        else:
-            self.safeOutput = True
-
-        if self.safeOutput:
+        if safeIO:
             self.write = self._fullwrite
         else:
             self.write = self._partialwrite
         # Force synchro to be sure that all output dirs
         # have been created.
-        self.comm.barrier()
-        if self.comm.Get_rank() == self.io_rank:
+        self.mpis.comm.barrier()
+        if self.mpis.rank == self.io_rank:
             self._file = open(self.filename, 'w')
 
-    def doWrite(self, ite):
+    def do_write(self, ite):
         """
         True if output is required
         for iteration ite
         @param ite : current iteration number
         """
-        rank = self.comm.Get_rank()
         num = ite + 1  # Number of iterations done
-        return rank == self.io_rank and (num % self.frequency) == 0
+        return self.mpis.rank == self.io_rank and (num % self.frequency) == 0
 
     def _fullwrite(self):
-        if self.comm.Get_rank() == self.io_rank:
-            self._file = open(self.filename, 'a')
-            ft.write(self._file, self.buffer)
-            self._file.close()
+        self._file = open(self.filename, 'a')
+        ft.write(self._file, self.buffer)
+        self._file.close()
 
     def _partialwrite(self):
-        if self.comm.Get_rank() == self.io_rank:
-            ft.write(self._file, self.buffer)
+        ft.write(self._file, self.buffer)
 
     def finalize(self):
-        if self.comm.Get_rank() == self.io_rank:
+        if self.mpis.rank == self.io_rank:
             if not self._file.closed:
                 self._file.close()
+
+    def __str__(self):
+        if self.mpis.rank == self.io_rank:
+            s = ' === Writer === \n'
+            s += ' - filename = ' + self.filename
+            s += '\n - buffshape = ' + str(self.buffshape)
+            s += '\n - frequ = ' + str(self.frequency)
+            return s
+
+
+class XMF(object):
+    """
+    Static class used to define xmf tools.
+    """
+    @staticmethod
+    def _list_format(l):
+        """
+        Format a list to the xml output.
+        Removes the '[]()' and replace ',' with ' ' in default str.
+        @param l : list to format
+        """
+        buff = str(l).replace(',', ' ').replace('[', '')
+        return buff.replace(']', '').replace('(', '').replace(')', '')
+
+    @staticmethod
+    def write_grid_attributes(topo, datasetNames, ite,
+                              time, filename, subset=None):
+        """
+        Write XDMF header into a file
+        @param[in] topo: a parmepy.mpi.topology.Cartesian, used as reference
+        to define local and global meshes in xdmf file.
+        @param[in] datasetNames: a list of datasets names
+        @param[in] ite: iteration number
+        @param[in] time: current time
+        @param[in] filename: name of the hdf file which contains datas
+        for the current process.
+        @param[in] subset: optional, to define a grid only on
+        this subset. If None, grid on the whole domain (from topo)
+        @return: a string which contains the xml-like header.
+        """
+
+        # The header (xml-like), saved in a string.
+        xml_grid = ""
+        dimension = topo.domain.dimension
+        if dimension == 2:
+            topoType = "2DCORECTMesh"
+            geoType = "ORIGIN_DXDY"
+        elif dimension == 3:
+            topoType = "3DCORECTMesh"
+            geoType = "ORIGIN_DXDYDZ"
+        xml_grid += "   <Grid Name=\"Iteration {0:03d}\"".format(ite)
+        xml_grid += " GridType=\"Uniform\">\n"
+        xml_grid += "    <Time Value=\"{0}\" />\n".format(time)
+        xml_grid += "    <Topology TopologyType=\"" + str(topoType) + "\""
+        xml_grid += " NumberOfElements=\""
+
+        # Check substet to find the required grid resolution
+        if subset is not None:
+            resolution = list(subset.mesh[topo].global_resolution())
+            origin = list(subset.real_orig[topo])
+        else:
+            resolution = list(topo.mesh.global_resolution())
+            origin = list(topo.domain.origin)
+        resolution.reverse()
+        origin.reverse()
+        xml_grid += XMF._list_format(resolution) + " \"/>\n"
+        xml_grid += "    <Geometry GeometryType=\"" + geoType + "\">\n"
+        xml_grid += "     <DataItem Dimensions=\"" + str(dimension) + " \""
+        xml_grid += " NumberType=\"Float\" Precision=\"8\" Format=\"XML\">\n"
+        xml_grid += "     " + XMF._list_format(origin) + "\n"
+        xml_grid += "     </DataItem>\n"
+        xml_grid += "     <DataItem Dimensions=\"" + str(dimension) + " \""
+        xml_grid += " NumberType=\"Float\" Precision=\"8\" Format=\"XML\">\n"
+        step = list(topo.mesh.space_step)
+        step.reverse()
+        xml_grid += "     " + XMF._list_format(step) + "\n"
+        xml_grid += "     </DataItem>\n"
+        xml_grid += "    </Geometry>\n"
+        # Append dataset parameters
+        for name in datasetNames:
+            xml_grid += "    <Attribute Name=\""
+            xml_grid += name + "\""
+            xml_grid += " AttributeType=\"Scalar\" Center=\"Node\">\n"
+            xml_grid += "     <DataItem Dimensions=\""
+            xml_grid += XMF._list_format(resolution) + " \""
+            xml_grid += " NumberType=\"Float\" Precision=\"8\" Format=\"HDF\""
+            xml_grid += " Compression=\"Raw\">\n"  #
+            xml_grid += "      " + filename.split('/')[-1]
+            xml_grid += ":/" + name
+            xml_grid += "\n     </DataItem>\n"
+            xml_grid += "    </Attribute>\n"
+        xml_grid += "   </Grid>\n"
+        return xml_grid
diff --git a/HySoP/hysop/tools/io_utils.pyc b/HySoP/hysop/tools/io_utils.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef87342118406705fb70e5592e637dc2b5d48e9a
Binary files /dev/null and b/HySoP/hysop/tools/io_utils.pyc differ
diff --git a/HySoP/hysop/tools/numpywrappers.py b/HySoP/hysop/tools/numpywrappers.py
index 333d6527dcca5e41ad1c6889b80f56c3488c7adc..7e1bfa29383b9e2fe92383436dba401bf9024c81 100644
--- a/HySoP/hysop/tools/numpywrappers.py
+++ b/HySoP/hysop/tools/numpywrappers.py
@@ -4,104 +4,177 @@
 
 Tools to build numpy arrays based on parmepy setup (float type ...)
 """
-from parmepy.constants import PARMES_REAL, ORDER, PARMES_INTEGER, PARMES_INDEX
+from parmepy.constants import PARMES_REAL, ORDER, PARMES_INTEGER,\
+    PARMES_DIM
 import numpy as np
 import scitools.filetable as ft
+bool = np.bool
 
 
 def zeros(shape, dtype=PARMES_REAL):
     """
-    @param shape : tuple of int
-    @return np.zeros(...)
-    Creates a numpy array of shape 'shape', handling
-    data of type parmepy.constants.PARMES_REAL using parmepy.constants.ORDER.
+    Wrapper to numpy.zeros, force order to parmepy.constants.ORDER
     """
     return np.zeros(shape, dtype=dtype, order=ORDER)
 
 
 def ones(shape, dtype=PARMES_REAL):
     """
-    @param shape : tuple of int
-    @return np.ones(...)
-    Creates a numpy array of shape 'shape', handling
-    data of type parmepy.constants.PARMES_REAL using parmepy.constants.ORDER.
+    Wrapper to numpy.ones, force order to parmepy.constants.ORDER
     """
     return np.ones(shape, dtype=dtype, order=ORDER)
 
 
 def zeros_like(tab):
     """
-    @param tab : a numpy array
-    @return np.zeros_like(...) an array of the same shape as tab, filled with
-    zeros.
-    Creates a numpy array of shape tab.shape, handling
-    data of type parmepy.constants.PARMES_REAL using parmepy.constants.ORDER.
+    Wrapper to numpy.zeros_like, force order to parmepy.constants.ORDER
     """
     return np.zeros_like(tab, dtype=tab.dtype, order=ORDER)
 
 
+def empty_like(tab):
+    """
+    Wrapper to numpy.empty_like, force order to parmepy.constants.ORDER
+    """
+    return np.empty_like(tab, dtype=tab.dtype, order=ORDER)
+
+
 def copy(tab):
     """
-    @param tab : a numpy array
-    @return a copy of tab with the same ordering type (fortran or C) as tab.
+    Wrapper to numpy.copy, ensure the same ordering in copy.
     """
     return tab.copy(order='A')
 
 
 def asarray(tab):
     """
-    @param tab : a numpy array
-    @return a numpy array in the Parmes
-    predefined ordering type (fortran or C).
+    Wrapper to numpy.asarray, force order to parmepy.constants.ORDER
     """
     return np.asarray(tab, order=ORDER, dtype=tab.dtype)
 
 
-def realarray(tab):
+def asrealarray(tab):
     """
-    @param tab : a numpy array
-    @return a numpy array in the Parmes of real,
-    precision set by PARMES_REAL,
-    predefined ordering type (fortran or C)
+    Wrapper to numpy.asarray, force order to parmepy.constants.ORDER
+    and type to parmepy.constants.PARMES_REAL
     """
     return np.asarray(tab, order=ORDER, dtype=PARMES_REAL)
 
 
-def indexarray(tab):
+def const_realarray(tab):
+    """
+    Wrapper to numpy.asarray, force order to parmepy.constants.ORDER
+    and type to parmepy.constants.PARMES_REAL.
+    Forbid any later change in the content of the array.
+    """
+    tmp = np.asarray(tab, order=ORDER, dtype=PARMES_REAL)
+    tmp.flags.writeable = False
+    return tmp
+
+
+def const_dimarray(tab):
     """
-    return an array of int, int type define by PARMES_INDEX
+    Wrapper to numpy.asarray, force order to parmepy.constants.ORDER
+    and type to parmepy.constants.PARMES_DIM.
+    Forbid any later change in the content of the array.
     """
-    return np.asarray(tab, order=ORDER, dtype=PARMES_INDEX)
+    tmp = np.asarray(tab, order=ORDER, dtype=PARMES_DIM)
+    tmp.flags.writeable = False
+    return tmp
 
 
-def integerarray(tab):
+def asintarray(tab):
     """
-    return an array of int, int type define by PARMES_INDEX
+    Wrapper to numpy.asarray, force order to parmepy.constants.ORDER
+    and type to parmepy.constants.PARMES_INTEGER.
     """
     return np.asarray(tab, order=ORDER, dtype=PARMES_INTEGER)
 
 
+def int_zeros(shape):
+    """
+    Wrapper to numpy.zeros, force order to parmepy.constants.ORDER
+    and type to parmepy.constants.PARMES_INTEGER.
+    """
+    return np.zeros(shape, order=ORDER, dtype=PARMES_INTEGER)
+
+
+def int_ones(shape):
+    """
+    Wrapper to numpy.ones, force order to parmepy.constants.ORDER
+    and type to parmepy.constants.PARMES_INTEGER.
+    """
+    return np.ones(shape, order=ORDER, dtype=PARMES_INTEGER)
+
+
+def asdimarray(tab):
+    """
+    Wrapper to numpy.asarray, force order to parmepy.constants.ORDER
+    and type to parmepy.constants.PARMES_DIM.
+    """
+    return np.asarray(tab, order=ORDER, dtype=PARMES_DIM)
+
+
+def asboolarray(tab):
+    """
+    Wrapper to numpy.asarray, force order to parmepy.constants.ORDER
+    and type to np.bool.
+    """
+    return np.asarray(tab, order=ORDER, dtype=np.bool)
+
+
+def dim_ones(shape):
+    """
+    Wrapper to numpy.ones, force order to parmepy.constants.ORDER
+    and type to parmepy.constants.PARMES_INTEGER.
+    """
+    return np.ones(shape, order=ORDER, dtype=PARMES_DIM)
+
+
+def dim_zeros(shape):
+    """
+    Wrapper to numpy.ones, force order to parmepy.constants.ORDER
+    and type to parmepy.constants.PARMES_DIM.
+    """
+    return np.zeros(shape, order=ORDER, dtype=PARMES_DIM)
+
+
+def equal(a, b):
+    msg = 'You try to compare two values of different '
+    msg += 'types : ' + str(np.asarray(a).dtype) + 'and '
+    msg += str(np.asarray(b).dtype) + '.'
+    assert np.asarray(a).dtype == np.asarray(b).dtype, msg
+    return np.equal(a, b)
+
+
 def abs(tab):
     """
-    @param tab : a numpy array
-    @return a numpy array in the Parmes
-    predefined ordering type (fortran or C), equal to abs(tab)
+    Wrapper to numpy.abs, force order to parmepy.constants.ORDER
     """
     return np.abs(tab, order=ORDER, dtype=tab.dtype)
 
 
-def sum(tab, dtype=PARMES_REAL):
+def real_sum(tab):
     """
+    Wrapper to numpy.sum, force type to parmepy.constants.PARMES_REAL.
     """
-    return np.sum(tab, dtype=dtype)
+    return np.sum(tab, dtype=PARMES_REAL)
 
 
 def prod(tab, dtype=PARMES_REAL):
     """
+    Wrapper to numpy.prod
     """
     return np.prod(tab, dtype=dtype)
 
 
+def add(a, b, c, dtype=PARMES_REAL):
+    """
+    Wrapper to numpy.add
+    """
+    return np.add(a, b, c, dtype=dtype)
+
+
 def writeToFile(fname, data, mode='a'):
     """
     write data (numpy array) to file fname
@@ -109,3 +182,17 @@ def writeToFile(fname, data, mode='a'):
     fout = open(fname, mode)
     ft.write(fout, data)
     fout.close()
+
+
+def lock(tab):
+    """
+    Set tab as a non-writeable array
+    """
+    tab.flags.writeable = False
+
+
+def unlock(tab):
+    """
+    set tab as a writeable array
+    """
+    tabs.flags.writeable = True
diff --git a/HySoP/hysop/tools/numpywrappers.pyc b/HySoP/hysop/tools/numpywrappers.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3fa317997d88f6cbf562e2c384fb86870bbad90d
Binary files /dev/null and b/HySoP/hysop/tools/numpywrappers.pyc differ
diff --git a/HySoP/hysop/tools/parameters.py b/HySoP/hysop/tools/parameters.py
new file mode 100644
index 0000000000000000000000000000000000000000..5afd8229c4571358d600f8d88d64572c896e04df
--- /dev/null
+++ b/HySoP/hysop/tools/parameters.py
@@ -0,0 +1,118 @@
+"""
+@file parameters.py
+
+Light classes to handle parameters for classes construction.
+"""
+
+from collections import namedtuple
+from parmepy.mpi.main_var import main_comm, main_rank
+from parmepy.constants import DEFAULT_TASK_ID
+
+
+class MPI_params(namedtuple('MPI_params', ['comm', 'task_id',
+                                           'rank', 'onTask'])):
+    """
+    Struct to save mpi parameters :
+    - comm : parent mpi communicator (default = main_comm)
+    - task_id : id of the task that owns this object
+    (default = DEFAULT_TASK_ID)
+    - rank of the current process in comm
+    - onTask : true if the task_id of the object corresponds
+    to the task_id of the current process.
+
+    This struct is useful for operators : each operator has
+    a MPI_params attribute to save its mpi settings.
+
+    Example:
+
+    op = SomeOperator(..., task_id=1)
+    if op.isOnTask():
+       ...
+
+    'isOnTask' will return MPI_params.onTask value for op
+    and tell if the current operator belongs to the current process
+    mpi task.
+    """
+    def __new__(cls, comm=main_comm, task_id=DEFAULT_TASK_ID,
+                rank=main_rank, onTask=True):
+        rank = comm.Get_rank()
+        return super(MPI_params, cls).__new__(cls, comm, task_id, rank, onTask)
+
+
+import parmepy.tools.numpywrappers as npw
+
+
+class Discretization(namedtuple("Discretization", ['resolution', 'ghosts'])):
+    """
+    A struct to handle discretization parameters:
+    - a resolution (either a list of int or a numpy array of int)
+    - number of points in the ghost-layer. One value per direction, list
+    or array. Default = None.
+    """
+    def __new__(cls, resolution, ghosts=None):
+        resolution = npw.asdimarray(resolution)
+        if ghosts is not None:
+            ghosts = npw.asdimarray(ghosts)
+            msg = 'Dimensions of resolution and ghosts parameters'
+            msg += ' are not complient.'
+            assert ghosts.size == resolution.size, msg
+            assert all(ghosts >= 0)
+        else:
+            ghosts = npw.dim_zeros(resolution.size)
+        return super(Discretization, cls).__new__(cls, resolution, ghosts)
+
+    def __eq__(self, other):
+        if self.__class__ != other.__class__:
+            return False
+        return (self.resolution == other.resolution).all() and\
+            (self.ghosts == other.ghosts).all()
+
+    def __ne__(self, other):
+        result = self.__eq__(other)
+        if result is NotImplemented:
+            return result
+        return not result
+
+from parmepy.constants import ASCII
+import os
+
+
+class IO_params(namedtuple("IO_params", ['filename', 'filepath',
+                                         'frequency', 'fileformat',
+                                         'io_leader'])):
+    """
+    A struct to handle I/O files parameters:
+    - name of the file
+    - location of the file
+    - frequency of output or input (e.g. every N times steps)
+    - format of the file (e.g. hdf5, ascii ...)
+    """
+    def __new__(cls, filename, filepath=None, frequency=1,
+                fileformat=None, io_leader=0):
+        import parmepy.tools.io_utils as io
+
+        # Filename is absolute path, filepath arg is ignored.
+        if os.path.isabs(filename):
+            filepath = os.path.dirname(filename)
+
+        else:
+            if filepath is not None:
+                filename = os.path.join(filepath, filename)
+                filepath = os.path.abspath(os.path.dirname(filename))
+            else:
+                filepath = os.path.dirname(filename)
+                if filepath == '':
+                    # Get default output path
+                    filepath = io.io.default_path()
+                    filename = os.path.join(filepath, filename)
+                else:
+                    filepath = os.path.abspath(filepath)
+                    filename = os.path.join(filepath,
+                                            os.path.basename(filename))
+        if fileformat is None:
+            fileformat = ASCII
+
+        io.io.check_dir(filename)
+#        filepath = os.path.dirname(filename)
+        return super(IO_params, cls).__new__(cls, filename, filepath,
+                                             frequency, fileformat, io_leader)
diff --git a/HySoP/hysop/tools/parameters.pyc b/HySoP/hysop/tools/parameters.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..896294cd68e405499496dc5102fe89bf99015950
Binary files /dev/null and b/HySoP/hysop/tools/parameters.pyc differ
diff --git a/HySoP/hysop/tools/plotDrag.py b/HySoP/hysop/tools/plotDrag.py
new file mode 100644
index 0000000000000000000000000000000000000000..58a49e065c6f2b7ac1a825db0800d29d12d6eabf
--- /dev/null
+++ b/HySoP/hysop/tools/plotDrag.py
@@ -0,0 +1,40 @@
+import scitools.easyviz as sea
+import numpy as np
+import scitools.filetable as ft
+import matplotlib.pyplot as plt
+
+# Lambda comparison
+# Results in Softs/MethodesParticulaires/Resultats_simu/Comp_lmanda
+fileDt=(('drag129_fixe'),('drag129_var'),('drag65_fixe'),('drag65_var'))
+fileListLayer=(('drag_01'),('drag_02'),('drag_03'))
+fileListLambda=(('drag_05'),('drag_06'),('drag_07'),('drag_09'),('drag_11'))
+fileListLambda2=(('d129_4'),('d129_5'),('drag_06'),('d129_7'),('d129_12'))
+fileListLambda3=(('d129_5'),('d257_5'))#,('d257_7'))
+fileListLambda4=(('drag_06'),('d257_6'))#,('d257_7'))
+
+legendLayer=('layer=0.1', 'layer=0.2','layer=0.3')
+legendLambda=('lambda=1e5','lambda=1e6','lambda=1e7','lambda=1e9','lambda=1e11', 'Ref from Folke : 0.6726')
+legendLambda2=('lambda=1e4','lambda=1e5','lambda=1e6','lambda=1e7','lambda=1e12','Ref from Folke : 0.6726')
+legendLambda3=('lambda=1e5','257 - lambda=1e5','Ref from Folke : 0.6726')
+legendLambda3=('lambda=1e6','257 - lambda=1e6','Ref from Folke : 0.6726')
+plt.hold('off')
+plt.xlabel('time')
+plt.hold('on')
+plt.ylabel('drag')
+plt.axis([0,70,0.3,1])
+plt.grid('on')
+
+for filename in fileListLambda3:
+	print ("my file is ", filename)
+	file=open(filename)
+	table=ft.read(file)
+	time=table[:,0]
+	drag=table[:,1]
+	file.close()
+	plt.plot(time,drag,'--')
+plt.axhline(y=0.6726,xmin=0,xmax=22,color='r')
+plt.legend(legendLambda3)
+#plt.hold('on')
+
+plt.savefig('DragRe133_CompLambda3.pdf')
+plt.show()
diff --git a/HySoP/hysop/tools/sys_utils.pyc b/HySoP/hysop/tools/sys_utils.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f143dd4428aa22e45fc29c6ce92d1d98ddd59d0b
Binary files /dev/null and b/HySoP/hysop/tools/sys_utils.pyc differ
diff --git a/HySoP/hysop/tools/tests/test_parameters.py b/HySoP/hysop/tools/tests/test_parameters.py
new file mode 100644
index 0000000000000000000000000000000000000000..c886d4ba531588029a4554d9444b27d81d4f9dc6
--- /dev/null
+++ b/HySoP/hysop/tools/tests/test_parameters.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+"""
+Tests for parmepy parameters-like variables.
+"""
+
+from parmepy.tools.parameters import IO_params
+from parmepy.tools.io_utils import io
+import os
+
+
+# IO params
+def test_io_params_1():
+    filename = 'toto.h5'
+    dirname = './test/'
+    iop = IO_params(filename, dirname)
+
+    realdirname = os.path.abspath('./test/toto.h5')
+    assert os.path.abspath(iop.filename) == realdirname
+    assert iop.filepath == os.path.dirname(realdirname)
+
+
+def test_io_params_2():
+    filename = 'toto.h5'
+    iop = IO_params(filename)
+    def_path = io.default_path()
+    realdirname = os.path.join(def_path, filename)
+    assert iop.filename == realdirname
+    assert iop.filepath == os.path.dirname(realdirname)
+
+
+def test_io_params_3():
+    filename = './test/toto.h5'
+    iop = IO_params(filename)
+    realdirname = os.path.abspath('./test/toto.h5')
+    assert iop.filename == realdirname
+    assert iop.filepath == os.path.dirname(realdirname)
+
+
+def test_io_params_4():
+    filename = './test/toto.h5'
+    dirname = './test2/'
+    iop = IO_params(filename, dirname)
+
+    realdirname = os.path.abspath('./test2/./test/toto.h5')
+    assert os.path.abspath(iop.filename) == realdirname
+    assert iop.filepath == os.path.dirname(realdirname)
+
+
+def test_io_params_5():
+    filename = '/tmp/test/toto.h5'
+    iop = IO_params(filename)
+    assert iop.filename == filename
+    assert iop.filepath == os.path.dirname(filename)
+
+
+def test_io_params_6():
+    filename = '/tmp/toto.h5'
+    dirname = '/test2/tmp'
+    iop = IO_params(filename, dirname)
+    assert iop.filename == filename
+    assert iop.filepath == os.path.dirname(filename)
+
+
+if __name__ == "__main__":
+    # test 1 is enough ...
+    test_io_params_1()
diff --git a/HySoP/hysop/tools/tests/test_timers.py b/HySoP/hysop/tools/tests/test_timers.py
new file mode 100644
index 0000000000000000000000000000000000000000..43ab6691887e3a624392c055488be61c0afc7ec7
--- /dev/null
+++ b/HySoP/hysop/tools/tests/test_timers.py
@@ -0,0 +1,44 @@
+"""
+Unitary tests for parmepy.tools.timers module
+"""
+from parmepy.tools.timers import Timer, timed_function
+
+
+class A_class(object):
+    def __init__(self):
+        self.name = 'A_class'
+        self.timer = Timer(self)
+        self.n = 0
+
+    @timed_function
+    def call(self):
+        self.n += 1
+
+    @timed_function
+    def call_other(self):
+        self.n += 10
+
+
+def test_timer_from_decorator():
+    a = A_class()
+    assert a.n == 0
+    a.call()
+    assert len(a.timer.f_timers.keys()) == 1
+    fun1 = a.timer.f_timers.keys()[0]
+    assert a.n == 1  # the call function have been called
+    assert a.timer.f_timers[fun1].ncalls == 1
+    #assert a.timer.f_timers[fun1].t == a.timer.f_timers[fun1].times[0]
+    a.call()
+    a.call_other()
+    assert len(a.timer.f_timers.keys()) == 2
+    fun2 = [f for f in a.timer.f_timers.keys() if f != fun1][0]
+    assert a.n == 12  # the call and call_other functions have been called
+    assert a.timer.f_timers[fun1].ncalls == 2
+    #assert a.timer.f_timers[fun1].t == \
+    #    a.timer.f_timers[fun1].times[0] + a.timer.f_timers[fun1].times[1]
+    assert a.timer.f_timers[fun2].ncalls == 1
+    #assert a.timer.f_timers[fun2].t == a.timer.f_timers[fun2].times[0]
+
+
+if __name__ == '__main__':
+    test_timer_from_decorator()
diff --git a/HySoP/hysop/tools/timers.py b/HySoP/hysop/tools/timers.py
new file mode 100644
index 0000000000000000000000000000000000000000..de2b5167ff79e1cea308f7e34618b6848d0ece47
--- /dev/null
+++ b/HySoP/hysop/tools/timers.py
@@ -0,0 +1,194 @@
+"""
+@file timers.py
+
+Contains class for monitoring computational time in a non-intrusive way
+thanks to decorator.
+"""
+from parmepy.mpi import MPI, main_rank
+from parmepy import __VERBOSE__
+ftime = MPI.Wtime
+
+
+def timed_function(f):
+    """
+    Decorator for function timing. Get the corresponding function timer in the
+    operator (represented in args[0]).
+
+    @remark : Works only with methods that belongs to object with Timer
+    member named timer.
+    """
+    def wrapper(*args, **kwargs):
+        f_timer = args[0].timer.getFunctionTimer(f)
+        res = f_timer(f)(*args, **kwargs)
+        return res
+    # def wrapper(*args, **kwargs):
+    #     return f(*args, **kwargs)
+    return wrapper
+
+
+class FunctionTimer(object):
+    """
+    Class for timing functions with the timed_function decorator.
+
+    The FunctionTimer is instancied with a given function. Calling the timer
+    will call the function timed with execution time computation.
+    """
+
+    def __init__(self, func):
+        """
+        Creates a timer for the given function.
+
+        @param func : function to time.
+        """
+        ## Timed function name
+        self.func_name = func.func_name
+        ## Total time spent in the function (in seconds).
+        self.t = 0.
+        # ## Times per calls
+        # self.times = []
+        ## Calls number
+        self.ncalls = 0
+
+    def __call__(self, func):
+        """
+        The function given is wrapped with time computations instructions
+        through MPI.Wtime function.
+        It returns the wrapped function.
+        @param func function to time.
+        @return wrapped function.
+        """
+        def f(*args, **kwargs):
+            t0 = ftime()
+            res = func(*args, **kwargs)
+            t = ftime() - t0
+            self.t += t
+            self.ncalls += 1
+            #self.times.append(t)
+            if __VERBOSE__:
+                print (args[0].__class__.__name__, ' -- ',)
+                print (self.func_name, " : ", t, 's')
+            return res
+        return f
+
+    def __str__(self):
+        s = self.func_name
+        s += ' : ' + str(self.t) + " s  ({0} calls)".format(self.ncalls)
+        return s
+
+
+class ManualFunctionTimer(FunctionTimer):
+    """
+    Class for manual timing some code. For instance, this class is desined for
+    monitoring OpenCL kernels where function call does not reflect
+    computational time.
+    """
+    def __init__(self, name):
+        def fun():
+            pass
+        fun.func_name = name
+        FunctionTimer.__init__(self, fun)
+
+    def append_time(self, t):
+        """Manual computational time append"""
+        self.t += t
+        self.ncalls += 1
+        #self.times.append(t)
+        if __VERBOSE__:
+            print (self.func_name, " : ", t, 's')
+
+
+class Timer(object):
+    """
+    Manages a dictionary of FunctionTimer objects for monitoring functions
+    member of the given operator.
+    """
+
+    def __init__(self, op, suffix=''):
+        """
+        Creates an Timer.
+        @param op : Timer owner.
+        """
+        ## Parent object name
+        self._obj = op
+        self.name = self._obj.name + suffix
+        ## FunctionTimer dictionary with functions or operators as keys.
+        self.f_timers = {}
+        ## Total time spent in the operator (in seconds).
+        self.t = 0.
+        self._isEmpty = True
+
+    def __add__(self, t):
+        """
+        Overrides operator +.
+        @param t : Other Timer object
+        """
+        t.compute_summary()
+        #self.t += t.t
+        self._isEmpty = False
+        self.f_timers[t] = t
+        return self
+
+    def getFunctionTimer(self, func):
+        """
+        Get the FunctionTimer related to given function, or created
+        if not exists.
+
+        @param func : Function to get the corresponding timer.
+        @return : FunctionTimer related to func.
+        """
+        try:
+            return self.f_timers[func]
+        except KeyError:
+            self.f_timers[func] = FunctionTimer(func)
+            return self.f_timers[func]
+
+    def addFunctionTimer(self, ft):
+        """
+        Add a function timer to Timer.
+
+        @param ft : FunctionTimer to add.
+        """
+        self.f_timers[ft] = ft
+
+    def addSubTimer(self, t, prefix=''):
+        if len(t.f_timers.keys()) > 0:
+            t.compute_summary()
+            t.name += prefix
+            self.f_timers[t] = t
+
+    def compute_summary(self):
+        """
+        Compute a summary of the different functions referenced herein.
+        """
+        self.t = 0.
+        for f in self.f_timers.keys():
+            # if self.f_timers[f] is a Timer, these times are already
+            # in the current Timer sum.
+            if not isinstance(self.f_timers[f], Timer):
+                self.t += self.f_timers[f].t
+                self._isEmpty = False
+
+    def rTimes(self):
+        s = ""
+        for f in sorted(self.f_timers.keys()):
+            nl = "@@F@@[" + str(main_rank) + "]@@S@@"
+            if isinstance(self.f_timers[f], Timer):
+                if len(self.f_timers[f].f_timers) > 0:
+                    s += nl + '     |-- ' + f.name
+                    if self.f_timers[f].t > 0.:
+                        s += " : " + str(self.f_timers[f].t)
+                    subTimer = self.f_timers[f]
+                    s += subTimer.rTimes().replace('@@S@@', '@@S@@    ')
+            else:
+                s += nl + '     |-- ' + str(self.f_timers[f])
+        return s
+
+    def __str__(self):
+        self.compute_summary()
+        if not self._isEmpty:
+            s = "[" + str(main_rank) + "] Time spent in "
+            s += self.name + ' = ' + str(self.t) + ' s'
+            s += self.rTimes().replace('@@F@@', '\n').replace('@@S@@', '')
+        else:
+            s = ""
+        return s
diff --git a/HySoP/hysop/tools/timers.pyc b/HySoP/hysop/tools/timers.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f5ca03ecbe41504d8685c494eec355798c5850df
Binary files /dev/null and b/HySoP/hysop/tools/timers.pyc differ
diff --git a/HySoP/src/#CMakeLists.txt# b/HySoP/src/#CMakeLists.txt#
new file mode 100644
index 0000000000000000000000000000000000000000..798019c8d518abb160e58b86eaabdab135331522
--- /dev/null
+++ b/HySoP/src/#CMakeLists.txt#
@@ -0,0 +1,233 @@
+#=======================================================
+# cmake utility to compile,link and install :
+#  - parmes library (libparmes...)
+#  - library particular solver from scales
+#
+#=======================================================
+
+# --- Set a version number for the package ---
+set(${PARMES_LIBRARY_NAME}_version 1.0.0)
+
+# This exe will be linked with libPARMES_LIBRARY_NAME
+set(EXE_NAME ${PARMES_LIBRARY_NAME}Run)
+
+# The list of all dirs containing sources to be compiled for the Parmes lib
+# Any file in those dirs will be used to create libparmes
+set(${PARMES_LIBRARY_NAME}_SRCDIRS
+  .
+  )
+
+if(WITH_FFTW)
+  set(${PARMES_LIBRARY_NAME}_SRCDIRS
+    ${${PARMES_LIBRARY_NAME}_SRCDIRS} fftw
+    )
+endif()
+
+#set(SCALES_DIR scalesReduced)
+set(SCALES_DIR scalesInterface)
+
+# --- scales ---
+if(WITH_SCALES)
+  set(${PARMES_LIBRARY_NAME}_SRCDIRS
+    ${${PARMES_LIBRARY_NAME}_SRCDIRS}
+    ${SCALES_DIR}/
+    ${SCALES_DIR}/layout
+    ${SCALES_DIR}/particles
+    ${SCALES_DIR}/particles/advec_line
+    #    ${SCALES_DIR}/output
+    )
+endif()
+
+# A main file to create an executable (test purpose)
+# Any files in these dirs will be used to create Parmes exec (linked with libparmes)
+set(${EXE_NAME}_SRCDIRS main)
+# Matching expr for files to be compiled.
+set(EXTS *.f90 *.f95 *F90)
+# Matching expr for headers (install purpose)
+set(EXTS_HDRS *.hpp *.h)
+# Note FP : we can also use  cmake vars ${CMAKE_Fortran_SOURCE_FILE_EXTENSIONS} ${CMAKE_C_SOURCE_FILE_EXTENSIONS} ${CMAKE_CXX_SOURCE_FILE_EXTENSIONS}
+
+# ============= The project =============
+# Set project name and project languages
+# => this automatically defines:
+#   - ${PARMES_LIBRARY_NAME}_BINARY_DIR : where you have run cmake, i.e. the place for compilation
+#   - ${PARMES_LIBRARY_NAME}_SOURCE_DIR : where sources (.f and .h and this CMakeLists.txt) are located
+# Note that because of OutOfSourceBuild, binary_dir and source_dir must be different.
+project(${PARMES_LIBRARY_NAME} Fortran)
+
+# ============= Search for libraries  =============
+# We search for libraries Parmes depends on and
+# set the compile/link conf (-I and -L opt)
+
+# --- Boost ---
+#set(Boost_ADDITIONAL_VERSIONS "1.49" "1.49.0")
+#set(Boost_NO_SYSTEM_PATHS ON)
+#find_package(Boost REQUIRED)
+#include_directories(${Boost_INCLUDE_DIRS})
+
+# ============= Prepare compilation =============
+
+# Force a default build type if not provided by user
+# CMAKE_BUILD_TYPE = empty, Debug, Release, RelWithDebInfo or MinSizeRel.
+if (NOT CMAKE_BUILD_TYPE)
+  set (CMAKE_BUILD_TYPE RELEASE CACHE STRING "Choose the type of build, options are: None, Debug, Release, RelWithDebInfo or MinSizeRel." FORCE)
+endif (NOT CMAKE_BUILD_TYPE)
+
+# If the project uses Fortran ...
+# Set module files directory (i.e. where .mod will be created)
+set(CMAKE_Fortran_MODULE_DIRECTORY ${CMAKE_BINARY_DIR}/Modules)
+#  Add compilation flags:
+#append_Fortran_FLAGS("-Wall -fPIC -ffree-line-length-none -DBLOCKING_SEND_PLUS -DBLOCKING_SEND")
+append_Fortran_FLAGS("-Wall -fPIC -ffree-line-length-none")
+
+if(USE_MPI)
+  # Find MPI for C++ and fortran.
+  find_package(MPI REQUIRED)
+  # -I
+  include_directories(${MPI_Fortran_INCLUDE_PATH})
+  # Add compilation flags
+  set(${PARMES_LIBRARY_NAME}_LINK_FLAGS ${${PARMES_LIBRARY_NAME}_LINK_FLAGS} ${MPI_Fortran_LINK_FLAGS})
+  # -I
+  # Add compilation flags
+  append_Fortran_flags(${MPI_Fortran_COMPILE_FLAGS})
+  #set(${PARMES_LIBRARY_NAME}_LINK_FLAGS ${${PARMES_LIBRARY_NAME}_LINK_FLAGS} ${MPI_Fortran_LINK_FLAGS})
+  set(LIBS ${LIBS} ${MPI_Fortran_LIBRARIES} )
+endif(USE_MPI)
+
+
+# --- PPM ---
+if(WITH_PPM)
+  add_subdirectory(ppmInterface)
+endif()
+
+# --- FFTW ---
+if(WITH_FFTW)
+  find_package(FFTW REQUIRED)
+  include_directories(${FFTW_INCLUDE_DIRS})
+  set(LIBS ${LIBS} ${FFTW_LIBRARIES})
+
+  # for python setup.py
+  set(FFTWLIB ${FFTW_LIBRARY} PARENT_SCOPE)
+endif()
+
+# ============= Generates ParmesConfig.hpp =============
+# The file PARMES_LIBRARY_NAME_defines.hpp will be generated from config.hpp.cmake;
+if(EXISTS ${CMAKE_SOURCE_DIR}/config.hpp.cmake)
+  configure_file(${CMAKE_SOURCE_DIR}/config.hpp.cmake ${PARMES_LIBRARY_NAME}_defines.hpp)
+  include_directories(${CMAKE_BINARY_DIR})
+endif()
+
+# ============= Source and header files list =============
+# We scan all files with matching extension in directories
+# containing sources.
+
+# Source and header files list:
+foreach(_DIR ${${PARMES_LIBRARY_NAME}_SRCDIRS})
+  set(_DIR_FILES)
+  foreach(_EXT ${EXTS}) # Source files
+    file(GLOB _DIR_FILES_EXT ${_DIR}/${_EXT})
+    if(_DIR_FILES_EXT)
+      list(APPEND ${PARMES_LIBRARY_NAME}_SRC ${_DIR_FILES_EXT})
+    endif()
+  endforeach()
+  foreach(_EXT ${EXTS_HDRS}) # Headers
+    file(GLOB _DIR_FILES_EXT ${_DIR}/${_EXT})
+    if(_DIR_FILES_EXT)
+      list(APPEND ${PARMES_LIBRARY_NAME}_HDRS ${_DIR_FILES_EXT})
+    endif()
+  endforeach()
+endforeach()
+# We add headers to source files
+list(APPEND ${PARMES_LIBRARY_NAME}_SRC ${${PARMES_LIBRARY_NAME}_HDRS})
+
+# The same for main dir ...
+foreach(_DIR ${${EXE_NAME}_SRCDIRS})
+  set(_DIR_FILES)
+  foreach(_EXT ${EXTS})
+    file(GLOB _DIR_FILES_EXT ${_DIR}/${_EXT})
+    if(_DIR_FILES_EXT)
+      list(APPEND ${EXE_NAME}_SRC ${_DIR_FILES_EXT})
+    endif()
+  endforeach()
+  foreach(_EXT ${EXTS_HDRS})
+    file(GLOB _DIR_FILES_EXT ${_DIR}/${_EXT})
+    if(_DIR_FILES_EXT)
+      list(APPEND ${EXE_NAME}_HDRS ${_DIR_FILES_EXT})
+    endif()
+  endforeach()
+endforeach()
+list(APPEND ${EXE_NAME}_SRC ${${EXE_NAME}_HDRS})
+
+# Add directories to those searched by compiler ...
+# -I
+include_directories(${${PARMES_LIBRARY_NAME}_SRCDIRS})
+include_directories(${${EXE_NAME}_HDRS})
+include_directories(${CMAKE_Fortran_MODULE_DIRECTORY})
+
+# Cmake tools to handle fortran-c interface. It will generate F2CMangle.hpp, a file
+# that will contain some useful macros to deal with fortran-C name mangling.
+# See cmake doc for this module.
+#include(FortranCInterface)
+#FortranCInterface_HEADER(${CMAKE_Fortran_MODULE_DIRECTORY}/F2CMangle.hpp
+#  MACRO_NAMESPACE "F2C_"
+#  SYMBOL_NAMESPACE "F2C_")
+
+# ============= Creates the library =============
+if(BUILD_SHARED_LIBS) # shared library
+  add_library(${PARMES_LIBRARY_NAME} SHARED ${${PARMES_LIBRARY_NAME}_SRC})
+else() # static library
+  add_library(${PARMES_LIBRARY_NAME} STATIC ${${PARMES_LIBRARY_NAME}_SRC})
+endif()
+# Libs to link with PARMES__LIBRARY_NAME
+target_link_libraries(${PARMES_LIBRARY_NAME} ${LIBS})
+
+### set(PARMES_LINKED_LIBRARIES ${LIBS} ${PARMES_LIBRARY_NAME} PARENT_SCOPE)
+# ============= Creates the executable =============
+if(WITH_FFTW)
+  add_executable(${EXE_NAME} ${${EXE_NAME}_SRC})
+  add_dependencies(${EXE_NAME} ${PARMES_LIBRARY_NAME})
+  
+  # libs to link with EXE_NAME
+  target_link_libraries(${EXE_NAME} ${PARMES_LIBRARY_NAME})
+  target_link_libraries(${EXE_NAME} ${LIBS})
+endif()
+
+# ============== Add tests ==============
+if(WITH_TESTS)
+  message(STATUS "Enable testing ...")
+#  begin_test(src/tests/F2003)
+#  new_test(testAllocatedPtr userMod.f90 wrapper.f90 testAllocatedPtr.cxx)
+#  new_test(testNullPtr userMod.f90 wrapper.f90 testNullPtr.cxx)
+#  end_test()
+#  begin_test(src/tests/Particles)
+#  new_test(testCreate3D testCreate3D.f90)
+#  new_test(testCreate2D testCreate2D.f90)
+#  end_test()
+endif(WITH_TESTS)
+
+# ============= Prepare install =============
+
+# The library
+# The library, the headers and mod files, the cmake generated files
+# will be install in CMAKE_INSTALL_PREFIX/lib include and share
+include(InstallPackage)
+if(${PARMES_LIBRARY_NAME}_HDRS})
+  install_package(${PACKAGE_NAME} ${PARMES_LIBRARY_NAME} ${${PARMES_LIBRARY_NAME}_HDRS})
+else()
+  install_package(${PACKAGE_NAME} ${PARMES_LIBRARY_NAME})
+endif()
+#install(TARGETS ${EXE_NAME}
+#RUNTIME DESTINATION bin  # executables
+#  ) 
+
+
+#configure_file(${PACKAGE_NAME}Config.cmake.in
+#  "${PARMES_LIBRARY_BINARY_DIR}/${PACKAGE_NAME}Config.cmake")
+#configure_file(${PACKAGE_NAME}ConfigVersion.cmake.in
+#  "${PARMES_LIBRARY_BINARY_DIR}/${PACKAGE_NAME}ConfigVersion.cmake" @ONLY)
+#install(FILES
+#  "${PARMES_LIBRARY_BINARY_DIR}/InstallFiles/${_PACK}Config.cmake"
+#  "${PARMES_LIBRARY_BINARY_DIR}/InstallFiles/${_PACK}ConfigVersion.cmake"
+#  DESTINATION "${${PACKAGE_NAME}_CMAKE_DIR}" COMPONENT dev)
+
+
diff --git a/HySoP/src/Domain.f90 b/HySoP/src/Domain.f90
new file mode 100755
index 0000000000000000000000000000000000000000..d20f0998140a4301baf735b0c0bfef3d93fecbfd
--- /dev/null
+++ b/HySoP/src/Domain.f90
@@ -0,0 +1,83 @@
+!> Physical domain definition and its discretisation.
+!! Note FP : fortran structures might be better but I avoid them on purpose, for f2py better compat. 
+module Domain
+
+  use client_data
+  
+  implicit none
+
+  private
+  public init_geometry, init_grid
+  public physDomainLowerPoint,physDomainUpperPoint,grid_resolution,grid_step,domain_ghostsize,domainLength,domain_bc,gridCells
+
+  !> Physical domain upper limit
+  real(mk), dimension(:), pointer :: physDomainUpperPoint=>NULL()
+  !> Physical domain lower limit
+  real(mk), dimension(:), pointer :: physDomainLowerPoint =>NULL()
+  !> Sizes of the domain
+  real(mk), dimension(:), pointer :: domainLength =>NULL()
+  !> Boundary conditions for the domain
+  integer,  dimension(:), pointer :: domain_bc=>NULL()
+  !> Number of ghost points in each direction
+  integer,  dimension(:), pointer :: domain_ghostsize=>NULL()
+  !> Grid resolution (number of points)
+  integer,  dimension(:), pointer :: grid_resolution =>NULL()
+  !> Number of cells (in each dir) on the grid
+  integer,  dimension(dim3) :: gridCells
+  !> Grid space step sizes
+  real(mk),  dimension(:), pointer :: grid_step =>NULL()
+  
+
+  integer, private :: istat
+
+contains
+  
+  !> Set continuous domain geometry
+  !> \param boundary conditions type (ppm way)
+  subroutine init_geometry(bc)
+    
+    !> Boundary conditions type (for ppm)
+    integer, intent(in) :: bc
+    
+    ! Domain size and min/max coords
+    allocate(physDomainLowerPoint(dim3), physDomainUpperPoint(dim3), domainLength(dim3),stat = istat)
+    if(istat.ne.0) stop 'Geometry, allocation failed'
+    physDomainUpperPoint = 0.0
+    physDomainLowerPoint = 0.0
+    physDomainUpperPoint(c_X) = 6.0!2.*pi!!1.0
+    physDomainUpperPoint(c_Y) = 4.0!2.*pi!1.0
+    !physDomainLowerPoint(c_X) = 0.0
+    !physDomainLowerPoint(c_Y) = 0.0
+    ! Boundary conditions and ghosts
+    allocate(domain_bc(2*dime), domain_ghostsize(dime), stat = istat)
+    if(istat.ne.0) stop 'BC, allocation failed'
+    domain_bc = bc
+    domain_ghostsize = 0 ! Warning : It seems that ghost>0 is required for ppm remesh 
+    domainLength = physDomainUpperPoint - physDomainLowerPoint
+
+  end subroutine init_geometry
+
+  !> Set discretisation parameters for the continuous domain.
+  !! Resolution corresponds to the number of points in one direction.
+  !! For periodic boundaries, point with index 1 is the same as point with last index
+  !! but must be present (ppm required)
+  subroutine init_grid()
+    
+    allocate(grid_resolution(dim3),grid_step(dim3),stat=istat)
+    if(istat.ne.0) stop 'grid_resolution, allocation failed'
+    grid_resolution = 1
+    grid_resolution(c_X) = 129!2001
+    grid_resolution(c_Y) = 65!1601
+!!$    grid_step(c_X) = (physDomainUpperPoint(c_X) - physDomainLowerPoint(c_X))/(real(grid_resolution(c_X),mk)-1.)
+!!$    grid_step(c_Y) = (physDomainUpperPoint(c_Y) - physDomainLowerPoint(c_Y))/(real(grid_resolution(c_Y),mk)-1.)
+!!$    grid_step(c_Z) = 1.
+!!$    grid_resolution(c_X) = 2001
+!!    grid_resolution = 256
+!!$    grid_resolution(c_Z) = 1
+    gridCells = max(grid_resolution-1,1)
+!!$    gridCells(c_Z) = 1
+    grid_step = (physDomainUpperPoint - physDomainLowerPoint)/gridCells
+    
+  end subroutine init_grid
+
+end module Domain
diff --git a/HySoP/src/Penalize.f90 b/HySoP/src/Penalize.f90
new file mode 100755
index 0000000000000000000000000000000000000000..6aaf75912bc2429bcb60fbc50d54f5bdffbbd1b5
--- /dev/null
+++ b/HySoP/src/Penalize.f90
@@ -0,0 +1,76 @@
+!> Penalization stuff (init chi, penalize vorticity)
+module penalisation
+
+  use client_data
+  use mpi
+  implicit none
+
+  private
+  
+  public :: penalise_velocity
+
+  ! Penalisation parameter 
+  real(mk),parameter,private :: lambda = 6e10
+  
+  ! Drag 
+  ! real(mk),dimension(dime) :: localDiagnostics
+
+contains
+  
+  !> Apply penalization functions to velocity (on boundaries to enforce Dirichlet cond. and on the sphere)
+  !! We also compute drag and lift (current mpi process local values) during this call
+  subroutine penalise_velocity(vel,dt,chi_sphere,chi_boundary)
+    ! Velocity, intent(inout)
+    real(mk), dimension(:,:,:,:,:), pointer :: vel
+    ! Indicators of the sets where velocity must be penalized
+    integer,dimension(:,:),pointer::chi_sphere
+    ! Indicators of the sets where velocity must be penalized
+    integer,dimension(:,:),pointer::chi_boundary
+    ! Time step
+    real(mk), intent(in) :: dt
+    ! Local mesh, number of points in each dir (1st index) for each sub (2nd index)
+    !integer, dimension(dime), intent(in)  :: resolution
+    integer :: k
+    real(mk) :: coef
+    !localDiagnostics=0.0
+
+    coef=1./(1.+dt*lambda)
+    
+    !do k=1,size(chi_boundary,2)
+    forall(k=1:size(chi_boundary,2))
+       vel(:,chi_boundary(1,k),chi_boundary(2,k),chi_boundary(3,k),:)=&
+            coef*vel(:,chi_boundary(1,k),chi_boundary(2,k),chi_boundary(3,k),:)
+    end forall!do
+    !forall(k=1:size(chi_sphere,2))
+       do k=1,size(chi_sphere,2)
+       vel(:,chi_sphere(1,k),chi_sphere(2,k),chi_sphere(3,k),:)=&
+            coef*vel(:,chi_sphere(1,k),chi_sphere(2,k),chi_sphere(3,k),:)
+       ! if( all(chi_sphere(:,k) < resolution(:))) then 
+       !   localDiagnostics=localDiagnostics + vel(:,chi_sphere(1,k),chi_sphere(2,k),chi_sphere(3,k),1)
+       ! end if
+    end do
+    !end forall
+
+  end subroutine penalise_velocity
+
+  !> Collect and reduce drag/lift values over all mpi proc
+!!$  subroutine get_dragAndLift(diagnostics,dvol,radius)
+!!$    !> diagnostics
+!!$    real(mk),dimension(:) :: diagnostics
+!!$    !> element of volume
+!!$    real(mk),intent(in)::dvol
+!!$    !> sphere radius
+!!$    real(mk),intent(in)::radius
+!!$    
+!!$    integer :: info
+!!$    real(mk) :: coef,uinf
+!!$
+!!$    uinf=1.0
+!!$    coef=2./(uinf**2*2.*radius)*dvol*lambda
+!!$    !localDiagnostics=coef*localDiagnostics
+!!$    ! Reduce drag and lift values over all procs
+!!$    !call MPI_Reduce(localDiagnostics,diagnostics,dime,MPI_DOUBLE_PRECISION,MPI_SUM,0,MPI_COMM_WORLD,info)
+!!$    
+!!$  end subroutine get_dragAndLift
+
+end module Penalisation
diff --git a/HySoP/src/Unstable/LEGI/changelog b/HySoP/src/Unstable/LEGI/changelog
new file mode 100644
index 0000000000000000000000000000000000000000..d6e0a83471ac41f3b633942ef3ac2b40087dbb8f
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/changelog
@@ -0,0 +1,16 @@
+2012-XX-XX X.X
+[new]		Adding 2 new scheme into particles methods: lambda 4 corrected
+			(fourth psace order, corrected for large time step) and M'6
+[optim]		Faster particles method.
+
+2012-02-23 1.0
+[new]		Parallel fft.
+[new]		Parallel pseudo spectral solver for NS equation and convection-diffusion problem.
+[new]		Advection solver based on particles method.
+[new]		Avs i/o in parallel context only in the spectral solver context.
+[new]		Distribued vtk xml output in context of advection solver.
+[new]		Both spectral and particle solver used the same mpi-topology.
+[new]		Solve scalar equation with particles method mixed to spectral solver.
+[new]       Use different resolution for velocity and scalars (for all scalars solvers)
+[new]		Post-processing: obtain spectrum
+[optim]		Optimisation of advection solver based on particles methods.
diff --git a/HySoP/src/Unstable/LEGI/doc/benchmark/Benchmark/description.tex b/HySoP/src/Unstable/LEGI/doc/benchmark/Benchmark/description.tex
new file mode 100644
index 0000000000000000000000000000000000000000..cd5fb817a67976632409c2f9e290c54826d956fc
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/doc/benchmark/Benchmark/description.tex
@@ -0,0 +1,49 @@
+%-----------------------------------------------------------------------------------------------------------------
+%--------------------------------------------------- Algo basique ----------------------------------------------------
+%-----------------------------------------------------------------------------------------------------------------
+
+\section{Mathematical description of the benchmarks}
+
+	Different benchmark are provided. The purpose is to evaluate the advection solver and its implement in both simple cases and complex cases.For reader convenience, let us sum up all these benchmark :
+\begin{enumerate}
+	\item 2D turning sphere : simple case where analytic solution are known.
+	\item Radial constant field with a velocity involving shear: this test-case allows to test how efficient the solver is in complex cases with large time step. The analytic solution is known.
+\end{enumerate}
+
+
+\subsection{(2D-)Turning sphere}
+
+	Let us denote by $\Omega = [0,1]^2$ the numerical domain.
+
+	The velocity field is defined by:
+\begin{equation}
+	\vect{v}\bigl( \begin{smallmatrix} x \\y \end{smallmatrix} \bigr) = \frac{2\pi}{T} \Biggl( \begin{matrix} 0.5 -y \\ x - 0.5 \end{matrix} \Biggr)
+\end{equation}
+with the period $T=1$ and $r_0 = \min(dx, dy)$ where $dx$, $dy$ denote the space step .
+	The scalar is initialized as following :
+\begin{equation}
+	u \bigl( \begin{smallmatrix} x \\y \end{smallmatrix} \bigr) = \begin{cases} 
+			0 & \text{if $r^2 = \bigl(x - 3/5 \bigr)^2 + \bigl(y - 3/5 \bigr)^2 > r_0^2$}
+			\\ \bigl( 1 - r^2/r_0^2)^4 \bigr) & \text{else, with $r^2 = \bigl(x - 3/5 \bigr)^2 + \bigl(y - 3/5 \bigr)^2$}
+		\end{cases}
+\end{equation}	
+
+	The analytic solution at time t consists of a rotation of the initial scalar value along Z-axis of angle $\frac{2\pi}{T} \times t$.
+
+\subsection{Radial constant field with a velocity involving shear}
+
+	Let us denote by $\Omega = [-1,1]^2$ the numerical domain.
+
+	The velocity field is defined by:
+\begin{equation}
+	\vect{v}\bigl( \begin{smallmatrix} x \\y \end{smallmatrix} \bigr) = \cos\Bigl( \frac{3\pi}{2} \Bigr) \bigl( \begin{smallmatrix} -y \\ x \end{smallmatrix} \bigr)
+\end{equation}
+	The scalar is initialized as following :
+\begin{equation}
+	u \bigl( \begin{smallmatrix} x \\y \end{smallmatrix} \bigr) = \begin{cases} 
+			0 & \text{if $r^2 = x^2 + y^2 > 1$}
+			\\ \bigl( 1 - r^2)^6 \bigr) & \text{else, with $r^2 = x^2 + y^2$}
+		\end{cases}
+\end{equation}	
+
+	As the radial component of the velocity vanishes, the initial scalar value match to a stationary solution. Therefore, the scalar field remains constant. As there is some tangent shear, this test will show how the implementation deals with large time step. For instance, if it is based on $\Lambda_{\tilde{2}}$ or $\Lambda_{\tilde{4}}$ remeshing formula, the corrected cases will appears (\ie some particles will be tagged). Note that for a CFL number larger than one, classical $\lambda_6$ formula are only of order 1.
\ No newline at end of file
diff --git a/HySoP/src/Unstable/LEGI/doc/benchmark/bench.pdf b/HySoP/src/Unstable/LEGI/doc/benchmark/bench.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..7b051d7e5f9586d2d1b4061fec2fedfe44837df4
Binary files /dev/null and b/HySoP/src/Unstable/LEGI/doc/benchmark/bench.pdf differ
diff --git a/HySoP/src/Unstable/LEGI/doc/benchmark/bench.tex b/HySoP/src/Unstable/LEGI/doc/benchmark/bench.tex
new file mode 100644
index 0000000000000000000000000000000000000000..94b8a7b61ac3648667202f37dfe0b364ada84b99
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/doc/benchmark/bench.tex
@@ -0,0 +1,131 @@
+
+%-----------------------------------------------------------------------------------------------------------------
+%--------------------------------------------------- Préambule ----------------------------------------------------
+%-----------------------------------------------------------------------------------------------------------------
+
+\documentclass[a4paper, 12pt, twoside, openright]{article}
+
+
+%--------------------------------------------------- Packages -----------------------------------------------------
+	% Basiques
+\usepackage[T1]{fontenc}
+\usepackage[english, french]{babel}			% parce que le texte est en français
+\usepackage[utf8]{inputenc}				% pour avoir les accents (car encodage en utf8)
+
+	% Mathématiques
+\usepackage{amssymb}
+\usepackage{amsmath}
+\usepackage{amsfonts}
+\usepackage{amsthm}
+\usepackage{amsxtra}
+
+	% Figures
+\usepackage{graphicx} 					% pour insérer et mettre en page des figures
+\usepackage{algorithm}					% pour mettre en page des algorithmes
+\usepackage{algorithmic}
+\usepackage{subfigure}					% pour mettre des figures côte à côtes.
+\usepackage{tikz}						% pour insérer des figures via Tkiz
+%\usepackage{gnuplot-lua-tikz} 				% version modifier de Tikz pour utiliser les sortie gnuplot-lua
+\usetikzlibrary{%
+  calc,%
+  through,%
+  intersections,%
+  arrows,%
+  shapes.misc,% wg. rounded rectangle
+  shapes.geometric,%
+  chains,%
+  positioning,% wg. " of "
+  scopes,%
+  backgrounds,%
+  fit,%
+  mindmap,%
+  plotmarks
+  }
+%\usepackage[margin=10pt,font=small, labelfont=bf, labelsep=endash]{caption}	
+								  	% Pour avoir des légendes d'images "plus mieux"
+
+	% Tableau
+\usepackage{multirow}					% Pour fusionner des lignes
+
+
+	% Outils et rédaction
+\usepackage{url}					% pour pouvoir mettre des url.
+\urlstyle{sf}
+
+
+% Pour la rédaction :
+\usepackage{pdfsync}					% lorsque l'on clique sur le pdf, on arrive à la bonne ligne de code du .tex
+\usepackage[disable, textsize=tiny, french, textwidth=2.cm, color=orange!60!, linecolor=black]{todonotes}
+
+\usepgfmodule{plot}
+
+
+
+%----------------------------------------------------- Paramètres ------------------------------------------------------
+
+% ///// Auteur, titre /////
+
+\author{Jean-Baptiste Lagaert} 
+\title{Benchmark for advection solver based on particles method}
+
+
+% ///// Nouvelles commandes et paramètres /////
+
+% Paramètres
+\graphicspath{{../Figures/}}			% Répertoire contenant les illustrations
+
+% Numérotations
+\numberwithin{equation}{section}		% Numérotation des équations par section
+\setcounter{tocdepth}{1} 				% Profondeur maximale des titres dans la table des matières
+
+
+% Nouvelles commandes
+	% Pour les dérivées partielles :
+\newcommand{\dt}{\partial_t}
+\newcommand{\dx}{\partial_x}
+\newcommand{\dy}{\partial_y}
+\newcommand{\dz}{\partial_z}
+\newcommand{\dn}{\partial_n}
+	% Lemma and remarks :
+\theoremstyle{plain}% default
+\newtheorem{thm}{Théorème}[section]
+\newtheorem{lem}[thm]{Lemme}
+\newtheorem{lemEng}[thm]{Lemma}
+\newtheorem{prop}[thm]{Proposition}
+\theoremstyle{remark}
+\newtheorem*{rem}{Remarque}
+\newtheorem*{rem_en}{Remark}
+	% Quelques raccourcis :
+\newcommand{\R}{\mathbb{R}}
+\newcommand{\ie}{\emph{ie }}
+\newcommand{\vect}[1]{\mathbf{#1}}
+\newcommand{\el}{\emph{eLYSe }}
+	%Other
+\newcommand{\Lag}{\mathcal{L}}
+\newcommand{\J}{\mathcal{J}}
+%\floatname{algorithm}{Algorithme} % Francisation de l'environnement algorithm
+
+%\input{Divers/page_garde}
+
+%-----------------------------------------------------------------------------------------------------------------
+%-------------------------------------------------- Document -----------------------------------------------------
+%----------------------------------------------------------------------------------------------------------------
+
+\begin{document}
+
+\maketitle
+
+
+\begin{otherlanguage}{english}
+%\begin{hyphenrules}{english}
+
+Some benchmark are provided in order to measure the implementation efficiency, the drawback and the benefits of our numerical method and there precision. These tests are design in order to present "easy" cases as complex cases hard to simulate.
+\vspace{1cm}
+
+\input{Benchmark/description}
+%\input{Benchmark/setup}
+%\input{Benchmark/resultats}
+%\end{hyphenrules}
+\end{otherlanguage}
+
+\end{document}
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/Doxyfile b/HySoP/src/Unstable/LEGI/doc/doxygen/Doxyfile
new file mode 100644
index 0000000000000000000000000000000000000000..15804ff83fd2631c25ac6619cd93a9163feaa3b2
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/doc/doxygen/Doxyfile
@@ -0,0 +1,1525 @@
+# Doxyfile 1.5.9
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+#       TAG = value [value, ...]
+# For lists items can also be appended using:
+#       TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file 
+# that follow. The default is UTF-8 which is also the encoding used for all 
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the 
+# iconv built into libc) for the transcoding. See 
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded 
+# by quotes) that should identify the project.
+
+PROJECT_NAME           = codescalar
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. 
+# This could be handy for archiving the generated documentation or 
+# if some version control system is used.
+
+PROJECT_NUMBER         =  1
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) 
+# base path where the generated documentation will be put. 
+# If a relative path is entered, it will be relative to the location 
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = ./codescalar_doc
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 
+# 4096 sub-directories (in 2 levels) under the output directory of each output 
+# format and will distribute the generated files over these directories. 
+# Enabling this option can be useful when feeding doxygen a huge amount of 
+# source files, where putting all generated files in the same directory would 
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS         = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all 
+# documentation generated by doxygen is written. Doxygen will use this 
+# information to generate all constant output in the proper language. 
+# The default language is English, other supported languages are: 
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, 
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, 
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English 
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, 
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, 
+# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE        = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will 
+# include brief member descriptions after the members that are listed in 
+# the file and class documentation (similar to JavaDoc). 
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend 
+# the brief description of a member or function before the detailed description. 
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the 
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator 
+# that is used to form the text in various listings. Each string 
+# in this list, if found as the leading text of the brief description, will be 
+# stripped from the text and the result after processing the whole list, is 
+# used as the annotated text. Otherwise, the brief description is used as-is. 
+# If left blank, the following values are used ("$name" is automatically 
+# replaced with the name of the entity): "The $name class" "The $name widget" 
+# "The $name file" "is" "provides" "specifies" "contains" 
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF       = 
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then 
+# Doxygen will generate a detailed section even if there is only a brief 
+# description.
+
+ALWAYS_DETAILED_SEC    = YES
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all 
+# inherited members of a class in the documentation of that class as if those 
+# members were ordinary class members. Constructors, destructors and assignment 
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full 
+# path before files name in the file list and in the header files. If set 
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES        = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag 
+# can be used to strip a user-defined part of the path. Stripping is 
+# only done if one of the specified strings matches the left-hand part of 
+# the path. The tag can be used to show relative paths in the file list. 
+# If left blank the directory from which doxygen is run is used as the 
+# path to strip.
+
+STRIP_FROM_PATH        = 
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of 
+# the path mentioned in the documentation of a class, which tells 
+# the reader which header file to include in order to use a class. 
+# If left blank only the name of the header file containing the class 
+# definition is used. Otherwise one should specify the include paths that 
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH    = 
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter 
+# (but less readable) file names. This can be useful is your file systems 
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen 
+# will interpret the first line (until the first dot) of a JavaDoc-style 
+# comment as the brief description. If set to NO, the JavaDoc 
+# comments will behave just like regular Qt-style comments 
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF      = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will 
+# interpret the first line (until the first dot) of a Qt-style 
+# comment as the brief description. If set to NO, the comments 
+# will behave just like regular Qt-style comments (thus requiring 
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen 
+# treat a multi-line C++ special comment block (i.e. a block of //! or /// 
+# comments) as a brief description. This used to be the default behaviour. 
+# The new default is to treat a multi-line C++ comment block as a detailed 
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented 
+# member inherits the documentation from any documented member that it 
+# re-implements.
+
+INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce 
+# a new page for each member. If set to NO, the documentation of a member will 
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. 
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE               = 8
+
+# This tag can be used to specify a number of aliases that acts 
+# as commands in the documentation. An alias has the form "name=value". 
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to 
+# put the command \sideeffect (or @sideeffect) in the documentation, which 
+# will result in a user-defined paragraph with heading "Side Effects:". 
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES                = 
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C 
+# sources only. Doxygen will then generate output that is more tailored for C. 
+# For instance, some of the names that are used will be different. The list 
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C  = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java 
+# sources only. Doxygen will then generate output that is more tailored for 
+# Java. For instance, namespaces will be presented as packages, qualified 
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran 
+# sources only. Doxygen will then generate output that is more tailored for 
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN   = YES
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL 
+# sources. Doxygen will then generate output that is tailored for 
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it parses. 
+# With this tag you can assign which parser to use for a given extension. 
+# Doxygen has a built-in mapping, but you can override or extend it using this tag. 
+# The format is ext=language, where ext is a file extension, and language is one of 
+# the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, 
+# Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat 
+# .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), 
+# use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+
+EXTENSION_MAPPING      = f90=Fortran F90=Fortran f=Fortran F=Fortran
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want 
+# to include (a tag file for) the STL sources as input, then you should 
+# set this tag to YES in order to let doxygen match functions declarations and 
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. 
+# func(std::string) {}). This also make the inheritance and collaboration 
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT    = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to 
+# enable parsing support.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. 
+# Doxygen will parse them like normal C++ but will assume all classes use public 
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter 
+# and setter methods for a property. Setting this option to YES (the default) 
+# will make doxygen to replace the get and set methods by a property in the 
+# documentation. This will only work if the methods are indeed getting or 
+# setting a simple type. If this is not the case, or you want to show the 
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC 
+# tag is set to YES, then doxygen will reuse the documentation of the first 
+# member in the group (if any) for the other members of the group. By default 
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of 
+# the same type (for instance a group of public functions) to be put as a 
+# subgroup of that type (e.g. under the Public Functions section). Set it to 
+# NO to prevent subgrouping. Alternatively, this can be done per class using 
+# the \nosubgrouping command.
+
+SUBGROUPING            = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum 
+# is documented as struct, union, or enum with the name of the typedef. So 
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct 
+# with name TypeT. When disabled the typedef will appear as a member of a file, 
+# namespace, or class. And the struct will be named TypeS. This can typically 
+# be useful for C code in case the coding convention dictates that all compound 
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT   = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to 
+# determine which symbols to keep in memory and which to flush to disk. 
+# When the cache is full, less often used symbols will be written to disk. 
+# For small to medium size projects (<1000 input files) the default value is 
+# probably good enough. For larger projects a too small cache size can cause 
+# doxygen to be busy swapping symbols to and from disk most of the time 
+# causing a significant performance penality. 
+# If the system has enough physical memory increasing the cache will improve the 
+# performance by keeping more symbols in memory. Note that the value works on 
+# a logarithmic scale so increasing the size by one will rougly double the 
+# memory usage. The cache size is given by this formula: 
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, 
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE      = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in 
+# documentation are documented, even if no documentation was available. 
+# Private class members and static file members will be hidden unless 
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+#EXTRACT_ALL            = NO
+EXTRACT_ALL            = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class 
+# will be included in the documentation.
+
+#EXTRACT_PRIVATE        = NO
+EXTRACT_PRIVATE        = YES
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file 
+# will be included in the documentation.
+
+#EXTRACT_STATIC         = NO
+EXTRACT_STATIC         = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) 
+# defined locally in source files will be included in the documentation. 
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. When set to YES local 
+# methods, which are defined in the implementation section but not in 
+# the interface are included in the documentation. 
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS  = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be 
+# extracted and appear in the documentation as a namespace called 
+# 'anonymous_namespace{file}', where file will be replaced with the base 
+# name of the file that contains the anonymous namespace. By default 
+# anonymous namespace are hidden.
+
+EXTRACT_ANON_NSPACES   = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all 
+# undocumented members of documented classes, files or namespaces. 
+# If set to NO (the default) these members will be included in the 
+# various overviews, but no documentation section is generated. 
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all 
+# undocumented classes that are normally visible in the class hierarchy. 
+# If set to NO (the default) these classes will be included in the various 
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all 
+# friend (class|struct|union) declarations. 
+# If set to NO (the default) these declarations will be included in the 
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any 
+# documentation blocks found inside the body of a function. 
+# If set to NO (the default) these blocks will be appended to the 
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation 
+# that is typed after a \internal command is included. If the tag is set 
+# to NO (the default) then the documentation will be excluded. 
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS          = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate 
+# file names in lower-case letters. If set to YES upper-case letters are also 
+# allowed. This is useful if you have classes or files whose names only differ 
+# in case and if your file system supports case sensitive file names. Windows 
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES       = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen 
+# will show members with their full class and namespace scopes in the 
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen 
+# will put a list of the files that are included by a file in the documentation 
+# of that file.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] 
+# is inserted in the documentation for inline members.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen 
+# will sort the (detailed) documentation of file and class members 
+# alphabetically by member name. If set to NO the members will appear in 
+# declaration order.
+
+SORT_MEMBER_DOCS       = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the 
+# brief documentation of file, namespace and class members alphabetically 
+# by member name. If set to NO (the default) the members will appear in 
+# declaration order.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the 
+# hierarchy of group names into alphabetical order. If set to NO (the default) 
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be 
+# sorted by fully-qualified names, including namespaces. If set to 
+# NO (the default), the class list will be sorted only by class name, 
+# not including the namespace part. 
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. 
+# Note: This option applies only to the class list, not to the 
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME     = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or 
+# disable (NO) the todo list. This list is created by putting \todo 
+# commands in the documentation.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or 
+# disable (NO) the test list. This list is created by putting \test 
+# commands in the documentation.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or 
+# disable (NO) the bug list. This list is created by putting \bug 
+# commands in the documentation.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or 
+# disable (NO) the deprecated list. This list is created by putting 
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional 
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS       = 
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines 
+# the initial value of a variable or define consists of for it to appear in 
+# the documentation. If the initializer consists of more lines than specified 
+# here it will be hidden. Use a value of 0 to hide initializers completely. 
+# The appearance of the initializer of individual variables and defines in the 
+# documentation can be controlled using \showinitializer or \hideinitializer 
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated 
+# at the bottom of the documentation of classes and structs. If set to YES the 
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES        = YES
+
+# If the sources in your project are distributed over multiple directories 
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy 
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES       = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. 
+# This will remove the Files entry from the Quick Index and from the 
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the 
+# Namespaces page. 
+# This will remove the Namespaces entry from the Quick Index 
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES        = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that 
+# doxygen should invoke to get the current version for each file (typically from 
+# the version control system). Doxygen will invoke the program by executing (via 
+# popen()) the command <command> <input-file>, where <command> is the value of 
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file 
+# provided by doxygen. Whatever the program writes to standard output 
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER    = 
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by 
+# doxygen. The layout file controls the global structure of the generated output files 
+# in an output format independent way. The create the layout file that represents 
+# doxygen's defaults, run doxygen with the -l option. You can optionally specify a 
+# file name after the option, if omitted DoxygenLayout.xml will be used as the name 
+# of the layout file.
+
+LAYOUT_FILE            = 
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated 
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are 
+# generated by doxygen. Possible values are YES and NO. If left blank 
+# NO is used.
+
+WARNINGS               = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings 
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will 
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for 
+# potential errors in the documentation, such as not documenting some 
+# parameters in a documented function, or documenting parameters that 
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR      = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for 
+# functions that are documented, but have no documentation for their parameters 
+# or return value. If set to NO (the default) doxygen will only warn about 
+# wrong or incomplete parameter documentation, but not about the absence of 
+# documentation.
+
+WARN_NO_PARAMDOC       = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that 
+# doxygen can produce. The string should contain the $file, $line, and $text 
+# tags, which will be replaced by the file and line number from which the 
+# warning originated and the warning text. Optionally the format may contain 
+# $version, which will be replaced by the version of the file (if it could 
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning 
+# and error messages should be written. If left blank the output is written 
+# to stderr.
+
+WARN_LOGFILE           = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain 
+# documented source files. You may enter file names like "myfile.cpp" or 
+# directories like "/usr/src/myproject". Separate the files or directories 
+# with spaces.
+
+INPUT                  = "../../src" "../../test/src" "../../example/src/" "ext_doc"
+
+# This tag can be used to specify the character encoding of the source files 
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is 
+# also the default input encoding. Doxygen uses libiconv (or the iconv built 
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for 
+# the list of possible encodings.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the 
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
+# and *.h) to filter out the source-files in the directories. If left 
+# blank the following patterns are tested: 
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx 
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS          = *.f90 *.F90 *.F *.f
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories 
+# should be searched for input files as well. Possible values are YES and NO. 
+# If left blank NO is used.
+
+RECURSIVE              = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should 
+# excluded from the INPUT source files. This way you can easily exclude a 
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE                = 
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or 
+# directories that are symbolic links (a Unix filesystem feature) are excluded 
+# from the input.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the 
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude 
+# certain files from those directories. Note that the wildcards are matched 
+# against the file with absolute path, so to exclude all test directories 
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       = */~* 
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names 
+# (namespaces, classes, functions, etc.) that should be excluded from the 
+# output. The symbol name can be a fully qualified name, a word, or if the 
+# wildcard * is used, a substring. Examples: ANamespace, AClass, 
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS        = 
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or 
+# directories that contain example code fragments that are included (see 
+# the \include command).
+
+EXAMPLE_PATH           = 
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the 
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
+# and *.h) to filter out the source-files in the directories. If left 
+# blank all files are included.
+
+EXAMPLE_PATTERNS       = 
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be 
+# searched for input files to be used with the \include or \dontinclude 
+# commands irrespective of the value of the RECURSIVE tag. 
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or 
+# directories that contain image that are included in the documentation (see 
+# the \image command).
+
+IMAGE_PATH             = ./images
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should 
+# invoke to filter for each input file. Doxygen will invoke the filter program 
+# by executing (via popen()) the command <filter> <input-file>, where <filter> 
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an 
+# input file. Doxygen will then use the output that the filter program writes 
+# to standard output. 
+# If FILTER_PATTERNS is specified, this tag will be 
+# ignored.
+
+INPUT_FILTER           = 
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern 
+# basis. 
+# Doxygen will compare the file name with each pattern and apply the 
+# filter if there is a match. 
+# The filters are a list of the form: 
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further 
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER 
+# is applied to all files.
+
+FILTER_PATTERNS        = 
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using 
+# INPUT_FILTER) will be used to filter the input files when producing source 
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES    = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will 
+# be generated. Documented entities will be cross-referenced with these sources. 
+# Note: To get rid of all source code in the generated output, make sure also 
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER         = YES
+
+# Setting the INLINE_SOURCES tag to YES will include the body 
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct 
+# doxygen to hide any special comment blocks from generated source code 
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES 
+# then for each documented function all documented 
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES 
+# then for each documented function all documented entities 
+# called/used by that function will be listed.
+
+REFERENCES_RELATION    = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) 
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from 
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will 
+# link to the source code. 
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code 
+# will point to the HTML generated by the htags(1) tool instead of doxygen 
+# built-in source browser. The htags tool is part of GNU's global source 
+# tagging system (see http://www.gnu.org/software/global/global.html). You 
+# will need version 4.8.6 or higher.
+
+USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen 
+# will generate a verbatim copy of the header file for each class for 
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS       = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index 
+# of all compounds will be generated. Enable this if the project 
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX     = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then 
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns 
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all 
+# classes will be put under the same header in the alphabetical index. 
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that 
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX          = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will 
+# generate HTML output.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for 
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank 
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for 
+# each generated HTML page. If it is left blank doxygen will generate a 
+# standard header.
+
+HTML_HEADER            = 
+
+# If the HTML_FOOTER_DESCRIPTION tag is set to YES, Doxygen will 
+# add generated date, project name and doxygen version to HTML footer.
+
+HTML_FOOTER_DESCRIPTION= NO
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for 
+# each generated HTML page. If it is left blank doxygen will generate a 
+# standard footer.
+
+HTML_FOOTER            = 
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading 
+# style sheet that is used by each HTML page. It can be used to 
+# fine-tune the look of the HTML output. If the tag is left blank doxygen 
+# will generate a default style sheet. Note that doxygen will try to copy 
+# the style sheet file to the HTML output directory, so don't put your own 
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET        = 
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, 
+# files or namespaces will be aligned in HTML using tables. If set to 
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS     = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML 
+# documentation will contain sections that can be hidden and shown after the 
+# page has loaded. For this to work a browser that supports 
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox 
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS  = NO
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files 
+# will be generated that can be used as input for Apple's Xcode 3 
+# integrated development environment, introduced with OSX 10.5 (Leopard). 
+# To create a documentation set, doxygen will generate a Makefile in the 
+# HTML output directory. Running make will produce the docset in that 
+# directory and running "make install" will install the docset in 
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find 
+# it at startup. 
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information.
+
+GENERATE_DOCSET        = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the 
+# feed. A documentation feed provides an umbrella under which multiple 
+# documentation sets from a single provider (such as a company or product suite) 
+# can be grouped.
+
+DOCSET_FEEDNAME        = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that 
+# should uniquely identify the documentation set bundle. This should be a 
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen 
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID       = org.doxygen.Project
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files 
+# will be generated that can be used as input for tools like the 
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) 
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP      = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can 
+# be used to specify the file name of the resulting .chm file. You 
+# can add a path in front of the file if the result should not be 
+# written to the html output directory.
+
+CHM_FILE               = 
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can 
+# be used to specify the location (absolute path including file name) of 
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run 
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION           = 
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag 
+# controls if a separate .chi index file is generated (YES) or that 
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI           = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING 
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file 
+# content.
+
+CHM_INDEX_ENCODING     = 
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag 
+# controls whether a binary table of contents is generated (YES) or a 
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members 
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND             = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER 
+# are set, an additional index file will be generated that can be used as input for 
+# Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated 
+# HTML documentation.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can 
+# be used to specify the file name of the resulting .qch file. 
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE               = 
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating 
+# Qt Help Project output. For more information please see 
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE          = 
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating 
+# Qt Help Project output. For more information please see 
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. 
+# For more information please see 
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME   = 
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see 
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS  = 
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's 
+# filter section matches. 
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS  = 
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can 
+# be used to specify the location of Qt's qhelpgenerator. 
+# If non-empty doxygen will try to run qhelpgenerator on the generated 
+# .qhp file.
+
+QHG_LOCATION           = 
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at 
+# top of each HTML page. The value NO (the default) enables the index and 
+# the value YES disables it.
+
+DISABLE_INDEX          = NO
+
+# This tag can be used to set the number of enum values (range [1..20]) 
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index 
+# structure should be generated to display hierarchical information. 
+# If the tag value is set to FRAME, a side panel will be generated 
+# containing a tree-like index structure (just like the one that 
+# is generated for HTML Help). For this to work a browser that supports 
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, 
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are 
+# probably better off using the HTML help feature. Other possible values 
+# for this tag are: HIERARCHIES, which will generate the Groups, Directories, 
+# and Class Hierarchy pages using a tree view instead of an ordered list; 
+# ALL, which combines the behavior of FRAME and HIERARCHIES; and NONE, which 
+# disables this behavior completely. For backwards compatibility with previous 
+# releases of Doxygen, the values YES and NO are equivalent to FRAME and NONE 
+# respectively.
+
+GENERATE_TREEVIEW      = YES
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be 
+# used to set the initial width (in pixels) of the frame in which the tree 
+# is shown.
+
+TREEVIEW_WIDTH         = 250
+
+# Use this tag to change the font size of Latex formulas included 
+# as images in the HTML documentation. The default is 10. Note that 
+# when you change the font size after a successful doxygen run you need 
+# to manually remove any form_*.png images from the HTML output directory 
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE       = 10
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will 
+# generate Latex output.
+
+GENERATE_LATEX         = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be 
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to 
+# generate index for LaTeX. If left blank `makeindex' will be used as the 
+# default command name.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact 
+# LaTeX documents. This may be useful for small projects and may help to 
+# save some trees in general.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used 
+# by the printer. Possible values are: a4, a4wide, letter, legal and 
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE             = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX 
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES         = 
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for 
+# the generated latex document. The header should contain everything until 
+# the first chapter. If it is left blank doxygen will generate a 
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER           = 
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated 
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will 
+# contain links (just like the HTML output) instead of page references 
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS         = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of 
+# plain latex in the generated Makefile. Set this option to YES to get a 
+# higher quality PDF documentation.
+
+USE_PDFLATEX           = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. 
+# command to the generated LaTeX files. This will instruct LaTeX to keep 
+# running if errors occur, instead of asking the user for help. 
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE        = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not 
+# include the index chapters (such as File Index, Compound Index, etc.) 
+# in the output.
+
+LATEX_HIDE_INDICES     = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE      = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output 
+# The RTF output is optimized for Word 97 and may not look very pretty with 
+# other RTF readers or editors.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact 
+# RTF documents. This may be useful for small projects and may help to 
+# save some trees in general.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated 
+# will contain hyperlink fields. The RTF file will 
+# contain links (just like the HTML output) instead of page references. 
+# This makes the output suitable for online browsing using WORD or other 
+# programs which support those fields. 
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's 
+# config file, i.e. a series of assignments. You only have to provide 
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE    = 
+
+# Set optional variables used in the generation of an rtf document. 
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE    = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will 
+# generate man pages
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to 
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION          = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output, 
+# then it will generate one additional man file for each entity 
+# documented in the real man page(s). These additional files 
+# only source the real man page, but without them the man command 
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will 
+# generate an XML file that captures the structure of 
+# the code including all documentation.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT             = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema, 
+# which can be used by a validating XML parser to check the 
+# syntax of the XML files.
+
+XML_SCHEMA             = 
+
+# The XML_DTD tag can be used to specify an XML DTD, 
+# which can be used by a validating XML parser to check the 
+# syntax of the XML files.
+
+XML_DTD                = 
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will 
+# dump the program listings (including syntax highlighting 
+# and cross-referencing information) to the XML output. Note that 
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING     = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will 
+# generate an AutoGen Definitions (see autogen.sf.net) file 
+# that captures the structure of the code including all 
+# documentation. Note that this feature is still experimental 
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will 
+# generate a Perl module file that captures the structure of 
+# the code including all documentation. Note that this 
+# feature is still experimental and incomplete at the 
+# moment.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate 
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able 
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be 
+# nicely formatted so it can be parsed by a human reader. 
+# This is useful 
+# if you want to understand what is going on. 
+# On the other hand, if this 
+# tag is set to NO the size of the Perl module output will be much smaller 
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file 
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. 
+# This is useful so different doxyrules.make files included by the same 
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX = 
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor   
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will 
+# evaluate all C-preprocessor directives found in the sources and include 
+# files.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro 
+# names in the source code. If set to NO (the default) only conditional 
+# compilation will be performed. Macro expansion can be done in a controlled 
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION        = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES 
+# then the macro expansion is limited to the macros specified with the 
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files 
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that 
+# contain include files that are not input files but should be processed by 
+# the preprocessor.
+
+INCLUDE_PATH           = 
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard 
+# patterns (like *.h and *.hpp) to filter out the header-files in the 
+# directories. If left blank, the patterns specified with FILE_PATTERNS will 
+# be used.
+
+INCLUDE_FILE_PATTERNS  = 
+
+# The PREDEFINED tag can be used to specify one or more macro names that 
+# are defined before the preprocessor is started (similar to the -D option of 
+# gcc). The argument of the tag is a list of macros of the form: name 
+# or name=definition (no spaces). If the definition and the = are 
+# omitted =1 is assumed. To prevent a macro definition from being 
+# undefined via #undef or recursively expanded use the := operator 
+# instead of the = operator.
+
+PREDEFINED             = 
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then 
+# this tag can be used to specify a list of macro names that should be expanded. 
+# The macro definition that is found in the sources will be used. 
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED      = 
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then 
+# doxygen's preprocessor will remove all function-like macros that are alone 
+# on a line, have an all uppercase name, and do not end with a semicolon. Such 
+# function macros are typically used for boiler-plate code, and will confuse 
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references   
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles. 
+# Optionally an initial location of the external documentation 
+# can be added for each tagfile. The format of a tag file without 
+# this location is as follows: 
+#  
+# TAGFILES = file1 file2 ... 
+# Adding location for the tag files is done as follows: 
+#  
+# TAGFILES = file1=loc1 "file2 = loc2" ... 
+# where "loc1" and "loc2" can be relative or absolute paths or 
+# URLs. If a location is present for each tag, the installdox tool 
+# does not have to be run to correct the links. 
+# Note that each tag file must have a unique name 
+# (where the name does NOT include the path) 
+# If a tag file is not located in the directory in which doxygen 
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES               =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create 
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE       = 
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed 
+# in the class index. If set to NO only the inherited external classes 
+# will be listed.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed 
+# in the modules index. If set to NO, only the current project's groups will 
+# be listed.
+
+EXTERNAL_GROUPS        = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script 
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH              = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool   
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will 
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base 
+# or super classes. Setting the tag to NO turns the diagrams off. Note that 
+# this option is superseded by the HAVE_DOT option below. This is only a 
+# fallback. It is recommended to install and use dot, since it yields more 
+# powerful graphs.
+
+CLASS_DIAGRAMS         = YES
+
+# You can define message sequence charts within doxygen comments using the \msc 
+# command. Doxygen will then run the mscgen tool (see 
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the 
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where 
+# the mscgen tool resides. If left empty the tool is assumed to be found in the 
+# default search path.
+
+MSCGEN_PATH            = 
+
+# If set to YES, the inheritance and collaboration graphs will hide 
+# inheritance and usage relations if the target is undocumented 
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is 
+# available from the path. This tool is part of Graphviz, a graph visualization 
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section 
+# have no effect if this option is set to NO (the default)
+
+#HAVE_DOT               = NO
+HAVE_DOT               = YES
+
+# By default doxygen will write a font called FreeSans.ttf to the output 
+# directory and reference it in all dot files that doxygen generates. This 
+# font does not include all possible unicode characters however, so when you need 
+# these (or just want a differently looking font) you can specify the font name 
+# using DOT_FONTNAME. You need need to make sure dot is able to find the font, 
+# which can be done by putting it in a standard location or by setting the 
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory 
+# containing the font.
+
+DOT_FONTNAME           = FreeSans
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. 
+# The default size is 10pt.
+
+DOT_FONTSIZE           = 10
+
+# By default doxygen will tell dot to use the output directory to look for the 
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a 
+# different font using DOT_FONTNAME you can set the path where dot 
+# can find it using this tag.
+
+DOT_FONTPATH           = 
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for each documented class showing the direct and 
+# indirect inheritance relations. Setting this tag to YES will force the 
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for each documented class showing the direct and 
+# indirect implementation dependencies (inheritance, containment, and 
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH    = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS           = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and 
+# collaboration diagrams in a style similar to the OMG's Unified Modeling 
+# Language.
+
+UML_LOOK               = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the 
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS     = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT 
+# tags are set to YES then doxygen will generate a graph for each documented 
+# file showing the direct and indirect include dependencies of the file with 
+# other documented files.
+
+INCLUDE_GRAPH          = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and 
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each 
+# documented header file showing the documented files that directly or 
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then 
+# doxygen will generate a call dependency graph for every global function 
+# or class method. Note that enabling this option will significantly increase 
+# the time of a run. So in most cases it will be better to enable call graphs 
+# for selected functions only using the \callgraph command.
+
+#CALL_GRAPH             = NO
+CALL_GRAPH             = YES
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then 
+# doxygen will generate a caller dependency graph for every global function 
+# or class method. Note that enabling this option will significantly increase 
+# the time of a run. So in most cases it will be better to enable caller 
+# graphs for selected functions only using the \callergraph command.
+
+#CALLER_GRAPH           = NO
+CALLER_GRAPH           = YES
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen 
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES 
+# then doxygen will show the dependencies a directory has on other directories 
+# in a graphical way. The dependency relations are determined by the #include 
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH        = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images 
+# generated by dot. Possible values are png, jpg, or gif 
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT       = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be 
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH               = 
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that 
+# contain dot files that are included in the documentation (see the 
+# \dotfile command).
+
+DOTFILE_DIRS           = 
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of 
+# nodes that will be shown in the graph. If the number of nodes in a graph 
+# becomes larger than this value, doxygen will truncate the graph, which is 
+# visualized by representing a node as a red box. Note that doxygen if the 
+# number of direct children of the root node in a graph is already larger than 
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note 
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES    = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the 
+# graphs generated by dot. A depth value of 3 means that only nodes reachable 
+# from the root by following a path via at most 3 edges will be shown. Nodes 
+# that lay further from the root node will be omitted. Note that setting this 
+# option to 1 or 2 may greatly reduce the computation time needed for large 
+# code bases. Also note that the size of a graph can be further restricted by 
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH    = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent 
+# background. This is disabled by default, because dot on Windows does not 
+# seem to support this out of the box. Warning: Depending on the platform used, 
+# enabling this option may lead to badly anti-aliased labels on the edges of 
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT        = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output 
+# files in one run (i.e. multiple -o and -T options on the command line). This 
+# makes dot run faster, but since only newer versions of dot (>1.8.10) 
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS      = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will 
+# generate a legend page explaining the meaning of the various boxes and 
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will 
+# remove the intermediate dot files that are used to generate 
+# the various graphs.
+
+DOT_CLEANUP            = YES
+
+#---------------------------------------------------------------------------
+# Options related to the search engine
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be 
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE           = NO
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/ext_doc/main_ext.f b/HySoP/src/Unstable/LEGI/doc/doxygen/ext_doc/main_ext.f
new file mode 100644
index 0000000000000000000000000000000000000000..84c0e75d1ee5f93f545be45021cb5ad08e4e6966
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/doc/doxygen/ext_doc/main_ext.f
@@ -0,0 +1,140 @@
+!> \mainpage CodeScalar Documentation
+!!
+!! \section intro_sec Introduction
+!!
+!! This is Code_scalar documentation !
+!!
+!! \section install_sec Installation
+!!
+!! \subsection tools_subsec Tools required:
+!! - cmake
+!! - fftw
+!! - mpi
+!!
+!! \subsection compile Compilation
+!! - Obtain the code from the svn repositories.
+!! - Go the the code location (the folder must contains at least "src" and "CMake" folders and a "CMakeList" file)
+!! - Create a build folder : \verbatim $ mkdir build \endverbatim
+!! - Run cmake : \verbatim $ cmake .. \endverbatim
+!! - Compile : \verbatim $ make \endverbatim
+!!
+!! \subsection add_comp Advanced compilation options
+!! This section explain how to perform advanced task. All these task can
+!! only be performed after having running at least one time cmake.
+!!
+!! - Update the fortran file generated with the .fortran file : <tt>$ make fortran_file</tt>
+!! - Generate (or update) doxygen documentation : <tt> $ make doc</tt>
+!! - Obtain the list of possible target for a make : <tt>$ make help</tt>
+!! - A <tt>$ make VERBOSE=1</tt>  shows all the command running by the make command.
+!!
+!! The CMakeLists contains some flag wich can be changed
+!! - <tt> WITH_TESTS</tt>: to compile tests.
+!! - <tt>WITH_EXAMPLE</tt>: to compile benchmark
+!! - <tt>GENERATE_SRC</tt>: to (re-) generate source file from *.fortran file
+!!
+!! \section running Running the program
+!! Run ScaleExe
+!!
+!! \section doc Additionnal documentation
+!!This manual is divided in the following sections:
+!!- \subpage part_doc : some information about particle method implementation
+!!- \subpage output_doc : some information about posprocessing and input/output
+!!
+
+!-----------------------------------------------------------
+!
+!> \page  part_doc About particle method implementation
+!!  This page described the implementation of the advection solver based
+!!  on particle method.
+!!  
+!! \section part_intro Introduction
+!!
+!! \subsection notation Some notation
+!! All the file using some common notation
+!!  -   local domain of each processus are indiced from 1 to N_proc.
+!!  -   d_sc = (scalar) space step
+!!  -   ind = mesh (or particles) indice. As we use fortran, it goes
+!!      from 1 to N_proc
+!!  -   pos = particles position. In accordance to ind, if there is one
+!!      particle at each mesh point, it goes from d_sci to (N_proc)*d_sc. 
+!!      A particle "i" belong to the local subdomain if and only if pos(i)
+!!      belongs to [d_sc;d_sc*(N_proc+1)[.
+!!  -   pos_adim = adimensionned particles position. In accordance to ind, if there is one
+!!      particle at each mesh point, it goes from 1 to N_proc
+!!
+!! \subsection Todo
+!! \todo
+!!  -   travailler par bloc de lignes plutôt que par ligne 1D
+!!      - petits groupes => moins d'usage mémoire en local, plus de
+!!        messages qui sont chacun plus petits.
+!!      - grands groupes => mutualisation des communications (moins
+!!        nombreux mais message plus grands) et nécessite plus de mémoire
+!!        en local. (à éviter si la ram est légère.)
+!!  -   add new remeshing schemes:
+!!      -    corrected lambda 4
+!!      -    M'6
+!!  -   Au sein d'un group de ligne, on peut utiliser un autre niveau de
+!!      parallélisme type open_mp.
+!
+!-----------------------------------------------------------
+
+
+!-----------------------------------------------------------
+!
+!> \page  output_doc About input and output
+!!  This page described the implementation of the IO
+!!
+!! \section io_intro Introduction
+!! Two output format are implemented : avs files and parallel vtk xml
+!! files.
+!!  The first one is provided by the "avs" module (see avs.F90) and the
+!! second one by "parallel_io" module. The both can be used for output
+!! during a simulation on a parrallel computer.
+!!
+!! \section Main differences between the two output format
+!! -    avs file can also be used as input files (input at format "vtk xml"
+!!      will be implement later)
+!! -    number of files created and parallel context:
+!!      -   the "vtk xml" standart allows to write output in more than one
+!!          files. The module provided in scale will perform write in a file
+!!          per processus. More precisly, output will not need any communication
+!!          neither mpi_io. But it also means it will create a lot of file
+!!          if the number of processes is high.
+!!      -   on the contrary, the avs module produces only one ouput file
+!!          containing all the data.
+!!      -   Note that in "small" cluster, the write on hard disk are
+!!          sequential. In future update, the number of output for "vtk
+!!          xml" file will be defined by the user.
+!! - both output are done at binary format.
+!!
+!! \subsection Todo
+!! \todo
+!! -    implement hdf5 output:
+!!      -    in first time, by creating one output file by mpi processus
+!!      -    then allow the user to gather some processus in one output file in
+!!           oder to limit the number of output and initialize this to a reasonnable
+!!           default value.
+!! -    implement hdf5 input in order to restart computation to a given state.
+!!          -    define the specification: what is conserved from the previous
+!!               computations ?
+!!               -    obviously input parameters, last field values, current time ...
+!!               -    ... but it is also possible to re-initialize memomry to its last
+!!                    state
+!!          -    implement the corresponding output (more than the standart ouput)
+!!               and the corresponding input.
+!!          -    to deal with unexpected bug, add minimal information to standart
+!!               output and implement matching input routines in order to also be able
+!!               to restart a bugged simulation.
+!! -    implement xml input 
+!   
+!-----------------------------------------------------------
+
+!> @defgroup part Particular method
+!! Details about particle method for scalar advection
+
+!> @defgroup output Input/Ouput procedures
+!! This group gather all the IO tools, including AVS IO and vtk xml output. 
+
+!> @defgroup cart_structure Cartesian mpi topology and mesh
+!! This group gather module devoted to mpi topology and some of the
+!! associated mesh informations.
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/images/datalayout.eps b/HySoP/src/Unstable/LEGI/doc/doxygen/images/datalayout.eps
new file mode 100644
index 0000000000000000000000000000000000000000..9a178fc06696c999ec5a594f0827fe06ccf22266
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/doc/doxygen/images/datalayout.eps
@@ -0,0 +1,264 @@
+%!PS-Adobe-2.0 EPSF-2.0
+%%Title: datalayout.fig
+%%Creator: fig2dev Version 3.2 Patchlevel 5
+%%CreationDate: Tue Jul 19 12:09:38 2011
+%%For: begou@thor (Patrick Begou)
+%%BoundingBox: 0 0 580 470
+%Magnification: 1.0000
+%%EndComments
+%%BeginProlog
+/$F2psDict 200 dict def
+$F2psDict begin
+$F2psDict /mtrx matrix put
+/col-1 {0 setgray} bind def
+/col0 {0.000 0.000 0.000 srgb} bind def
+/col1 {0.000 0.000 1.000 srgb} bind def
+/col2 {0.000 1.000 0.000 srgb} bind def
+/col3 {0.000 1.000 1.000 srgb} bind def
+/col4 {1.000 0.000 0.000 srgb} bind def
+/col5 {1.000 0.000 1.000 srgb} bind def
+/col6 {1.000 1.000 0.000 srgb} bind def
+/col7 {1.000 1.000 1.000 srgb} bind def
+/col8 {0.000 0.000 0.560 srgb} bind def
+/col9 {0.000 0.000 0.690 srgb} bind def
+/col10 {0.000 0.000 0.820 srgb} bind def
+/col11 {0.530 0.810 1.000 srgb} bind def
+/col12 {0.000 0.560 0.000 srgb} bind def
+/col13 {0.000 0.690 0.000 srgb} bind def
+/col14 {0.000 0.820 0.000 srgb} bind def
+/col15 {0.000 0.560 0.560 srgb} bind def
+/col16 {0.000 0.690 0.690 srgb} bind def
+/col17 {0.000 0.820 0.820 srgb} bind def
+/col18 {0.560 0.000 0.000 srgb} bind def
+/col19 {0.690 0.000 0.000 srgb} bind def
+/col20 {0.820 0.000 0.000 srgb} bind def
+/col21 {0.560 0.000 0.560 srgb} bind def
+/col22 {0.690 0.000 0.690 srgb} bind def
+/col23 {0.820 0.000 0.820 srgb} bind def
+/col24 {0.500 0.190 0.000 srgb} bind def
+/col25 {0.630 0.250 0.000 srgb} bind def
+/col26 {0.750 0.380 0.000 srgb} bind def
+/col27 {1.000 0.500 0.500 srgb} bind def
+/col28 {1.000 0.630 0.630 srgb} bind def
+/col29 {1.000 0.750 0.750 srgb} bind def
+/col30 {1.000 0.880 0.880 srgb} bind def
+/col31 {1.000 0.840 0.000 srgb} bind def
+
+end
+save
+newpath 0 470 moveto 0 0 lineto 580 0 lineto 580 470 lineto closepath clip newpath
+-27.6 496.8 translate
+1 -1 scale
+
+/cp {closepath} bind def
+/ef {eofill} bind def
+/gr {grestore} bind def
+/gs {gsave} bind def
+/sa {save} bind def
+/rs {restore} bind def
+/l {lineto} bind def
+/m {moveto} bind def
+/rm {rmoveto} bind def
+/n {newpath} bind def
+/s {stroke} bind def
+/sh {show} bind def
+/slc {setlinecap} bind def
+/slj {setlinejoin} bind def
+/slw {setlinewidth} bind def
+/srgb {setrgbcolor} bind def
+/rot {rotate} bind def
+/sc {scale} bind def
+/sd {setdash} bind def
+/ff {findfont} bind def
+/sf {setfont} bind def
+/scf {scalefont} bind def
+/sw {stringwidth} bind def
+/tr {translate} bind def
+/tnt {dup dup currentrgbcolor
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add srgb}
+  bind def
+/shd {dup dup currentrgbcolor 4 -2 roll mul 4 -2 roll mul
+  4 -2 roll mul srgb} bind def
+/$F2psBegin {$F2psDict begin /$F2psEnteredState save def} def
+/$F2psEnd {$F2psEnteredState restore end} def
+
+$F2psBegin
+10 setmiterlimit
+0 slj 0 slc
+ 0.06299 0.06299 sc
+%%EndProlog
+%
+% Fig objects follow
+%
+% 
+% here starts figure with depth 50
+% Polyline
+0 slj
+0 slc
+7.500 slw
+n 4605 2070 m 4500 2070 4500 3315 105 arcto 4 {pop} repeat
+  4500 3420 7320 3420 105 arcto 4 {pop} repeat
+  7425 3420 7425 2175 105 arcto 4 {pop} repeat
+  7425 2070 4605 2070 105 arcto 4 {pop} repeat
+ cp gs col0 s gr 
+/Times-Bold ff 222.25 scf sf
+6030 2430 m
+gs 1 -1 sc (module maindatalayout) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+5895 2925 m
+gs 1 -1 sc (Global parameters) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+5895 3195 m
+gs 1 -1 sc (Global function) dup sw pop 2 div neg 0 rm  col0 sh gr
+% Polyline
+n 450 3150 m 3645 3150 l 3645 4500 l 450 4500 l
+ cp gs col0 s gr 
+/Times-BoldItalic ff 222.25 scf sf
+2025 3420 m
+gs 1 -1 sc (implementdatalayout.Fortran) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Italic ff 190.50 scf sf
+1935 3870 m
+gs 1 -1 sc (Module generic implementation) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Italic ff 190.50 scf sf
+1890 4185 m
+gs 1 -1 sc (with tags.) dup sw pop 2 div neg 0 rm  col0 sh gr
+% Polyline
+n 4830 450 m 4725 450 4725 1290 105 arcto 4 {pop} repeat
+  4725 1395 7140 1395 105 arcto 4 {pop} repeat
+  7245 1395 7245 555 105 arcto 4 {pop} repeat
+  7245 450 4830 450 105 arcto 4 {pop} repeat
+ cp gs col0 s gr 
+/Times-Bold ff 222.25 scf sf
+5940 720 m
+gs 1 -1 sc (module mpilayout) dup sw pop 2 div neg 0 rm  col0 sh gr
+% Polyline
+n 6810 4950 m 6705 4950 6705 6150 105 arcto 4 {pop} repeat
+  6705 6255 9525 6255 105 arcto 4 {pop} repeat
+  9630 6255 9630 5055 105 arcto 4 {pop} repeat
+  9630 4950 6810 4950 105 arcto 4 {pop} repeat
+ cp gs col0 s gr 
+/Times-Bold ff 222.25 scf sf
+8145 5220 m
+gs 1 -1 sc (module cmplxdatalayout) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+8145 5625 m
+gs 1 -1 sc (Datalayout code and functions) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+8055 5850 m
+gs 1 -1 sc (for complex type values.) dup sw pop 2 div neg 0 rm  col0 sh gr
+% Polyline
+n 4920 6975 m 4815 6975 4815 7770 105 arcto 4 {pop} repeat
+  4815 7875 7815 7875 105 arcto 4 {pop} repeat
+  7920 7875 7920 7080 105 arcto 4 {pop} repeat
+  7920 6975 4920 6975 105 arcto 4 {pop} repeat
+ cp gs col0 s gr 
+/Times-Bold ff 222.25 scf sf
+6435 7290 m
+gs 1 -1 sc (module datalayout) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+6435 7695 m
+gs 1 -1 sc (Generic interface for datalayout) dup sw pop 2 div neg 0 rm  col0 sh gr
+% Polyline
+30.000 slw
+gs  clippath
+5962 1674 m 5962 1380 l 5827 1380 l 5827 1674 l 5827 1674 l 5895 1449 l 5962 1674 l cp
+eoclip
+n 5895 2070 m
+ 5895 1395 l gs col0 s gr gr
+
+% arrowhead
+n 5962 1674 m 5895 1449 l 5827 1674 l 5895 1629 l 5962 1674 l 
+ cp gs 0.00 setgray ef gr  col0 s
+% Polyline
+gs  clippath
+6340 3531 m 6103 3356 l 6022 3465 l 6259 3640 l 6259 3640 l 6119 3452 l 6340 3531 l cp
+eoclip
+n 8145 4950 m
+ 6075 3420 l gs col0 s gr gr
+
+% arrowhead
+n 6340 3531 m 6119 3452 l 6259 3640 l 6263 3558 l 6340 3531 l 
+ cp gs 0.00 setgray ef gr  col0 s
+% Polyline
+gs  clippath
+5695 3685 m 5868 3447 l 5759 3368 l 5586 3606 l 5586 3606 l 5773 3464 l 5695 3685 l cp
+eoclip
+n 4725 4905 m
+ 5805 3420 l gs col0 s gr gr
+
+% arrowhead
+n 5695 3685 m 5773 3464 l 5586 3606 l 5667 3609 l 5695 3685 l 
+ cp gs 0.00 setgray ef gr  col0 s
+% Polyline
+gs  clippath
+5143 6351 m 4873 6232 l 4819 6355 l 5088 6474 l 5088 6474 l 4910 6322 l 5143 6351 l cp
+eoclip
+n 6390 6975 m
+ 4860 6300 l gs col0 s gr gr
+
+% arrowhead
+n 5143 6351 m 4910 6322 l 5088 6474 l 5074 6394 l 5143 6351 l 
+ cp gs 0.00 setgray ef gr  col0 s
+% Polyline
+gs  clippath
+7868 6425 m 8140 6311 l 8087 6186 l 7816 6301 l 7816 6301 l 8050 6276 l 7868 6425 l cp
+eoclip
+n 6390 6975 m
+ 8100 6255 l gs col0 s gr gr
+
+% arrowhead
+n 7868 6425 m 8050 6276 l 7816 6301 l 7884 6345 l 7868 6425 l 
+ cp gs 0.00 setgray ef gr  col0 s
+% Polyline
+ [120] 0 sd
+gs  clippath
+4221 4764 m 4364 4957 l 4473 4876 l 4330 4684 l 4330 4684 l 4383 4869 l 4221 4764 l cp
+eoclip
+n 3645 3870 m
+ 4410 4905 l gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 4221 4764 m 4383 4869 l 4330 4684 l 4221 4764 l  cp gs col7 1.00 shd ef gr  col0 s
+% Polyline
+ [120] 0 sd
+gs  clippath
+7460 4957 m 7692 5019 l 7726 4888 l 7495 4827 l 7495 4827 l 7652 4939 l 7460 4957 l cp
+eoclip
+n 3645 3870 m
+ 7695 4950 l gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 7460 4957 m 7652 4939 l 7495 4827 l 7460 4957 l  cp gs col7 1.00 shd ef gr  col0 s
+% Polyline
+7.500 slw
+n 3480 4905 m 3375 4905 3375 6195 105 arcto 4 {pop} repeat
+  3375 6300 6195 6300 105 arcto 4 {pop} repeat
+  6300 6300 6300 5010 105 arcto 4 {pop} repeat
+  6300 4905 3480 4905 105 arcto 4 {pop} repeat
+ cp gs col0 s gr 
+/Times-Roman ff 190.50 scf sf
+6390 4500 m
+gs 1 -1 sc  344.0 rot (automatic code generation) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+4860 4635 m
+gs 1 -1 sc  52.0 rot (use) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+7240 4182 m
+gs 1 -1 sc  328.0 rot (use) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+4725 5580 m
+gs 1 -1 sc (Datalayout code and functions) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+4680 5850 m
+gs 1 -1 sc (for real type values.) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Bold ff 222.25 scf sf
+4815 5175 m
+gs 1 -1 sc (module realdatalayout) dup sw pop 2 div neg 0 rm  col0 sh gr
+% here ends figure;
+$F2psEnd
+rs
+showpage
+%%Trailer
+%EOF
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/images/datalayout.fig b/HySoP/src/Unstable/LEGI/doc/doxygen/images/datalayout.fig
new file mode 100644
index 0000000000000000000000000000000000000000..474af97ddc56a63235fa3e567cb923ac32061870
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/doc/doxygen/images/datalayout.fig
@@ -0,0 +1,70 @@
+#FIG 3.2  Produced by xfig version 3.2.5
+Landscape
+Center
+Metric
+A4      
+100.00
+Single
+-2
+1200 2
+6 4500 2070 7425 3420
+2 4 0 1 0 7 50 -1 -1 0.000 0 0 7 0 0 5
+	 7425 3420 7425 2070 4500 2070 4500 3420 7425 3420
+4 1 0 50 -1 2 14 0.0000 4 210 2400 6030 2430 module maindatalayout\001
+4 1 0 50 -1 0 12 0.0000 4 180 1470 5895 2925 Global parameters\001
+4 1 0 50 -1 0 12 0.0000 4 135 1290 5895 3195 Global function\001
+-6
+6 450 3150 3645 4500
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 450 3150 3645 3150 3645 4500 450 4500 450 3150
+4 1 0 50 -1 3 14 0.0000 4 210 2865 2025 3420 implementdatalayout.Fortran\001
+4 1 0 50 -1 1 12 0.0000 4 180 2610 1935 3870 Module generic implementation\001
+4 1 0 50 -1 1 12 0.0000 4 180 795 1890 4185 with tags.\001
+-6
+6 4725 450 7245 1395
+2 4 0 1 0 7 50 -1 -1 0.000 0 0 7 0 0 5
+	 7245 1395 7245 450 4725 450 4725 1395 7245 1395
+4 1 0 50 -1 2 14 0.0000 4 210 1830 5940 720 module mpilayout\001
+-6
+6 6705 4950 9630 6255
+2 4 0 1 0 7 50 -1 -1 0.000 0 0 7 0 0 5
+	 9630 6255 9630 4950 6705 4950 6705 6255 9630 6255
+4 1 0 50 -1 2 14 0.0000 4 210 2505 8145 5220 module cmplxdatalayout\001
+4 1 0 50 -1 0 12 0.0000 4 180 2490 8145 5625 Datalayout code and functions\001
+4 1 0 50 -1 0 12 0.0000 4 180 1995 8055 5850 for complex type values.\001
+-6
+6 4815 6975 7920 7875
+2 4 0 1 0 7 50 -1 -1 0.000 0 0 7 0 0 5
+	 7920 7875 7920 6975 4815 6975 4815 7875 7920 7875
+4 1 0 50 -1 2 14 0.0000 4 210 1890 6435 7290 module datalayout\001
+4 1 0 50 -1 0 12 0.0000 4 180 2565 6435 7695 Generic interface for datalayout\001
+-6
+2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	2 1 3.00 135.00 180.00
+	 5895 2070 5895 1395
+2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	2 1 3.00 135.00 180.00
+	 8145 4950 6075 3420
+2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	2 1 3.00 135.00 180.00
+	 4725 4905 5805 3420
+2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	2 1 3.00 135.00 180.00
+	 6390 6975 4860 6300
+2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	2 1 3.00 135.00 180.00
+	 6390 6975 8100 6255
+2 1 1 3 0 7 50 -1 -1 8.000 0 0 -1 1 0 2
+	1 0 3.00 135.00 180.00
+	 3645 3870 4410 4905
+2 1 1 3 0 7 50 -1 -1 8.000 0 0 -1 1 0 2
+	1 0 3.00 135.00 180.00
+	 3645 3870 7695 4950
+2 4 0 1 0 7 50 -1 -1 0.000 0 0 7 0 0 5
+	 6300 6300 6300 4905 3375 4905 3375 6300 6300 6300
+4 1 0 50 -1 0 12 6.0039 4 180 2160 6390 4500 automatic code generation\001
+4 1 0 50 -1 0 12 0.9076 4 90 270 4860 4635 use\001
+4 1 0 50 -1 0 12 5.7247 4 90 270 7240 4182 use\001
+4 1 0 50 -1 0 12 0.0000 4 180 2490 4725 5580 Datalayout code and functions\001
+4 1 0 50 -1 0 12 0.0000 4 180 1590 4680 5850 for real type values.\001
+4 1 0 50 -1 2 14 0.0000 4 210 2280 4815 5175 module realdatalayout\001
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/images/datalayout.png b/HySoP/src/Unstable/LEGI/doc/doxygen/images/datalayout.png
new file mode 100644
index 0000000000000000000000000000000000000000..185db320b4302bf7add1f420f86560eea5cb059b
Binary files /dev/null and b/HySoP/src/Unstable/LEGI/doc/doxygen/images/datalayout.png differ
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/images/dealias.eps b/HySoP/src/Unstable/LEGI/doc/doxygen/images/dealias.eps
new file mode 100644
index 0000000000000000000000000000000000000000..87459da4971c774ea290e27dcca53b7b615b34d5
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/doc/doxygen/images/dealias.eps
@@ -0,0 +1,3766 @@
+%!PS-Adobe-3.0 EPSF-3.0
+%%BoundingBox: (atend)
+%%LanguageLevel: 2
+%%Creator: Grace-5.1.21
+%%CreationDate: Thu Jan 26 09:34:17 2012
+%%DocumentData: Clean8Bit
+%%Orientation: Portrait
+%%Title: Untitled
+%%For: begou
+%%DocumentNeededResources: (atend)
+%%EndComments
+%%BeginProlog
+/m {moveto} def
+/l {lineto} def
+/s {stroke} def
+/n {newpath} def
+/c {closepath} def
+/RL {rlineto} def
+/SLW {setlinewidth} def
+/GS {gsave} def
+/GR {grestore} def
+/SC {setcolor} def
+/SGRY {setgray} def
+/SRGB {setrgbcolor} def
+/SD {setdash} def
+/SLC {setlinecap} def
+/SLJ {setlinejoin} def
+/SCS {setcolorspace} def
+/FFSF {findfont setfont} def
+/CC {concat} def
+/PXL {n m 0 0 RL s} def
+/Color0 {1.0000 1.0000 1.0000} def
+/Color1 {0.0000 0.0000 0.0000} def
+/Color2 {1.0000 0.0000 0.0000} def
+/Color3 {0.0000 1.0000 0.0000} def
+/Color4 {0.0000 0.0000 1.0000} def
+/Color5 {1.0000 1.0000 0.0000} def
+/Color6 {0.7373 0.5608 0.5608} def
+/Color7 {0.8627 0.8627 0.8627} def
+/Color8 {0.5804 0.0000 0.8275} def
+/Color9 {0.0000 1.0000 1.0000} def
+/Color10 {1.0000 0.0000 1.0000} def
+/Color11 {1.0000 0.6471 0.0000} def
+/Color12 {0.4471 0.1294 0.7373} def
+/Color13 {0.4039 0.0275 0.2824} def
+/Color14 {0.2510 0.8784 0.8157} def
+/Color15 {0.0000 0.5451 0.0000} def
+/Color16 {0.7529 0.7529 0.7529} def
+/Color17 {0.5059 0.5059 0.5059} def
+/Color18 {0.2588 0.2588 0.2588} def
+/PTRN {
+ /pat_bits exch def 
+ <<
+  /PaintType 2
+  /PatternType 1 /TilingType 1
+  /BBox[0 0 16 16]
+  /XStep 16 /YStep 16
+  /PaintProc {
+   pop
+   16 16 true [-1 0 0 -1 16 16] pat_bits imagemask
+  }
+ >>
+ [0.0017 0 0 0.0017 0 0]
+ makepattern
+} def
+/Pattern0 {<0000000000000000000000000000000000000000000000000000000000000000> PTRN} bind def
+/Pattern1 {<ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff> PTRN} bind def
+/Pattern2 {<eeeeffffbbbbffffeeeeffffbbbbffffeeeeffffbbbbffffeeeeffffbbbbffff> PTRN} bind def
+/Pattern3 {<eeeebbbbeeeebbbbeeeebbbbeeeebbbbeeeebbbbeeeebbbbeeeebbbbeeeebbbb> PTRN} bind def
+/Pattern4 {<5555aaaa5555aaaa5555aaaa5555aaaa5555aaaa5555aaaa5555aaaa5555aaaa> PTRN} bind def
+/Pattern5 {<1111444411114444111144441111444411114444111144441111444411114444> PTRN} bind def
+/Pattern6 {<1111000044440000111100004444000011110000444400001111000044440000> PTRN} bind def
+/Pattern7 {<1010000000000000010100000000000010100000000000000101000000000000> PTRN} bind def
+/Pattern8 {<0000000000000000000000000000000000000000000000000000000000000000> PTRN} bind def
+/Pattern9 {<1e1e0f0f8787c3c3e1e1f0f078783c3c1e1e0f0f8787c3c3e1e1f0f078783c3c> PTRN} bind def
+/Pattern10 {<7878f0f0e1e1c3c387870f0f1e1e3c3c7878f0f0e1e1c3c387870f0f1e1e3c3c> PTRN} bind def
+/Pattern11 {<3333333333333333333333333333333333333333333333333333333333333333> PTRN} bind def
+/Pattern12 {<ffffffff00000000ffffffff00000000ffffffff00000000ffffffff00000000> PTRN} bind def
+/Pattern13 {<8181424224241818181824244242818181814242242418181818242442428181> PTRN} bind def
+/Pattern14 {<8080404020201010080804040202010180804040202010100808040402020101> PTRN} bind def
+/Pattern15 {<0101020204040808101020204040808001010202040408081010202040408080> PTRN} bind def
+/Pattern16 {<2222222222222222222222222222222222222222222222222222222222222222> PTRN} bind def
+/Pattern17 {<0000ffff000000000000ffff000000000000ffff000000000000ffff00000000> PTRN} bind def
+/Pattern18 {<2222ffff222222222222ffff222222222222ffff222222222222ffff22222222> PTRN} bind def
+/Pattern19 {<ffffffff33333333ffffffff33333333ffffffff33333333ffffffff33333333> PTRN} bind def
+/Pattern20 {<0f0f0f0f0f0f0f0ff0f0f0f0f0f0f0f00f0f0f0f0f0f0f0ff0f0f0f0f0f0f0f0> PTRN} bind def
+/Pattern21 {<ff00ff00ff00ff00ff00ff00ff00ff0000ff00ff00ff00ff00ff00ff00ff00ff> PTRN} bind def
+/Pattern22 {<8001800180018001800180018001ffffffff8001800180018001800180018001> PTRN} bind def
+/Pattern23 {<c003c003c003c003c003c003ffffffffffffffffc003c003c003c003c003c003> PTRN} bind def
+/Pattern24 {<040404040404ffff404040404040ffff040404040404ffff404040404040ffff> PTRN} bind def
+/Pattern25 {<180018001800180018001800ffffffff001800180018001800180018ffffffff> PTRN} bind def
+/Pattern26 {<1111b8b87c7c3a3a1111a3a3c7c78b8b1111b8b87c7c3a3a1111a3a3c7c78b8b> PTRN} bind def
+/Pattern27 {<101010102828c7c70101010182827c7c101010102828c7c70101010182827c7c> PTRN} bind def
+/Pattern28 {<1c1c121211112121c1c12121111112121c1c121211112121c1c1212111111212> PTRN} bind def
+/Pattern29 {<3e3e414180808080e3e31414080808083e3e414180808080e3e3141408080808> PTRN} bind def
+/Pattern30 {<4848888884848383848488884848383848488888848483838484888848483838> PTRN} bind def
+/Pattern31 {<03030404080808080c0c12122121c0c003030404080808080c0c12122121c0c0> PTRN} bind def
+/ellipsedict 8 dict def
+ellipsedict /mtrx matrix put
+/EARC {
+ ellipsedict begin
+  /endangle exch def
+  /startangle exch def
+  /yrad exch def
+  /xrad exch def
+  /y exch def
+  /x exch def
+  /savematrix mtrx currentmatrix def
+  x y translate
+  xrad yrad scale
+  0 0 1 startangle endangle arc
+  savematrix setmatrix
+ end
+} def
+/TL {
+  /kcomp exch def
+  /linewidth exch def
+  /offset exch def
+  GS
+  0 offset rmoveto
+  linewidth SLW
+  dup stringwidth exch kcomp add exch RL s
+  GR
+} def
+/KINIT
+{
+ /kvector exch def
+ /kid 0 def
+} def
+/KPROC
+{
+ pop pop
+ kvector kid get
+ 0 rmoveto
+ /kid 1 kid add def
+} def
+/DefEncoding [
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /space
+ /exclam
+ /quotedbl
+ /numbersign
+ /dollar
+ /percent
+ /ampersand
+ /quoteright
+ /parenleft
+ /parenright
+ /asterisk
+ /plus
+ /comma
+ /hyphen
+ /period
+ /slash
+ /zero
+ /one
+ /two
+ /three
+ /four
+ /five
+ /six
+ /seven
+ /eight
+ /nine
+ /colon
+ /semicolon
+ /less
+ /equal
+ /greater
+ /question
+ /at
+ /A
+ /B
+ /C
+ /D
+ /E
+ /F
+ /G
+ /H
+ /I
+ /J
+ /K
+ /L
+ /M
+ /N
+ /O
+ /P
+ /Q
+ /R
+ /S
+ /T
+ /U
+ /V
+ /W
+ /X
+ /Y
+ /Z
+ /bracketleft
+ /backslash
+ /bracketright
+ /asciicircum
+ /underscore
+ /grave
+ /a
+ /b
+ /c
+ /d
+ /e
+ /f
+ /g
+ /h
+ /i
+ /j
+ /k
+ /l
+ /m
+ /n
+ /o
+ /p
+ /q
+ /r
+ /s
+ /t
+ /u
+ /v
+ /w
+ /x
+ /y
+ /z
+ /braceleft
+ /bar
+ /braceright
+ /asciitilde
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /.notdef
+ /space
+ /exclamdown
+ /cent
+ /sterling
+ /currency
+ /yen
+ /brokenbar
+ /section
+ /dieresis
+ /copyright
+ /ordfeminine
+ /guillemotleft
+ /logicalnot
+ /hyphen
+ /registered
+ /macron
+ /degree
+ /plusminus
+ /twosuperior
+ /threesuperior
+ /acute
+ /mu
+ /paragraph
+ /periodcentered
+ /cedilla
+ /onesuperior
+ /ordmasculine
+ /guillemotright
+ /onequarter
+ /onehalf
+ /threequarters
+ /questiondown
+ /Agrave
+ /Aacute
+ /Acircumflex
+ /Atilde
+ /Adieresis
+ /Aring
+ /AE
+ /Ccedilla
+ /Egrave
+ /Eacute
+ /Ecircumflex
+ /Edieresis
+ /Igrave
+ /Iacute
+ /Icircumflex
+ /Idieresis
+ /Eth
+ /Ntilde
+ /Ograve
+ /Oacute
+ /Ocircumflex
+ /Otilde
+ /Odieresis
+ /multiply
+ /Oslash
+ /Ugrave
+ /Uacute
+ /Ucircumflex
+ /Udieresis
+ /Yacute
+ /Thorn
+ /germandbls
+ /agrave
+ /aacute
+ /acircumflex
+ /atilde
+ /adieresis
+ /aring
+ /ae
+ /ccedilla
+ /egrave
+ /eacute
+ /ecircumflex
+ /edieresis
+ /igrave
+ /iacute
+ /icircumflex
+ /idieresis
+ /eth
+ /ntilde
+ /ograve
+ /oacute
+ /ocircumflex
+ /otilde
+ /odieresis
+ /divide
+ /oslash
+ /ugrave
+ /uacute
+ /ucircumflex
+ /udieresis
+ /yacute
+ /thorn
+ /ydieresis
+] def
+%%EndProlog
+%%BeginSetup
+%%EndSetup
+594.96 594.96 scale
+n
+0.0000 0.0000 m
+0.0000 1.0000 l
+1.4151 1.0000 l
+1.4151 0.0000 l
+c
+[/DeviceRGB] SCS
+Color0 SC
+fill
+[/DeviceRGB] SCS
+Color2 SC
+[] 0 SD
+0.0015 SLW
+0 SLC
+0 SLJ
+n
+0.1500 0.8127 m
+0.1507 0.8121 l
+0.1513 0.8115 l
+0.1520 0.8108 l
+0.1527 0.8100 l
+0.1533 0.8093 l
+0.1540 0.8086 l
+0.1547 0.8079 l
+0.1553 0.8071 l
+0.1560 0.8063 l
+0.1567 0.8056 l
+0.1573 0.8049 l
+0.1580 0.8041 l
+0.1587 0.8033 l
+0.1593 0.8024 l
+0.1600 0.8017 l
+0.1607 0.8009 l
+0.1613 0.8001 l
+0.1620 0.7992 l
+0.1627 0.7983 l
+0.1633 0.7975 l
+0.1640 0.7967 l
+0.1647 0.7959 l
+0.1653 0.7950 l
+0.1660 0.7940 l
+0.1667 0.7932 l
+0.1673 0.7924 l
+0.1680 0.7915 l
+0.1687 0.7906 l
+0.1693 0.7896 l
+0.1700 0.7887 l
+0.1707 0.7879 l
+0.1713 0.7870 l
+0.1720 0.7861 l
+0.1727 0.7852 l
+0.1733 0.7842 l
+0.1740 0.7832 l
+0.1747 0.7824 l
+0.1753 0.7815 l
+0.1760 0.7806 l
+0.1767 0.7797 l
+0.1773 0.7787 l
+0.1780 0.7777 l
+0.1787 0.7767 l
+0.1793 0.7758 l
+0.1800 0.7749 l
+0.1807 0.7739 l
+0.1813 0.7730 l
+0.1820 0.7719 l
+0.1827 0.7709 l
+0.1833 0.7699 l
+0.1840 0.7689 l
+0.1847 0.7680 l
+0.1853 0.7670 l
+0.1860 0.7661 l
+0.1867 0.7651 l
+0.1873 0.7642 l
+0.1880 0.7632 l
+0.1887 0.7621 l
+0.1893 0.7611 l
+0.1900 0.7600 l
+0.1907 0.7590 l
+0.1913 0.7580 l
+0.1920 0.7570 l
+0.1927 0.7560 l
+0.1933 0.7549 l
+0.1940 0.7539 l
+0.1947 0.7528 l
+0.1953 0.7517 l
+0.1960 0.7505 l
+0.1967 0.7494 l
+0.1973 0.7489 l
+0.1980 0.7484 l
+0.1987 0.7479 l
+0.1993 0.7476 l
+0.2000 0.7472 l
+0.2007 0.7468 l
+0.2013 0.7464 l
+0.2020 0.7460 l
+0.2027 0.7456 l
+0.2033 0.7452 l
+0.2040 0.7449 l
+0.2047 0.7445 l
+0.2053 0.7441 l
+0.2060 0.7437 l
+0.2067 0.7434 l
+0.2073 0.7430 l
+0.2080 0.7426 l
+0.2087 0.7422 l
+0.2093 0.7419 l
+0.2100 0.7415 l
+0.2107 0.7411 l
+0.2113 0.7408 l
+0.2120 0.7404 l
+0.2127 0.7401 l
+0.2133 0.7398 l
+0.2140 0.7395 l
+0.2147 0.7393 l
+0.2153 0.7391 l
+0.2160 0.7388 l
+0.2167 0.7386 l
+0.2173 0.7384 l
+0.2180 0.7381 l
+0.2187 0.7379 l
+0.2193 0.7377 l
+0.2200 0.7375 l
+0.2207 0.7373 l
+0.2213 0.7371 l
+0.2220 0.7369 l
+0.2227 0.7366 l
+0.2233 0.7364 l
+0.2240 0.7362 l
+0.2247 0.7360 l
+0.2253 0.7358 l
+0.2260 0.7356 l
+0.2267 0.7353 l
+0.2273 0.7351 l
+0.2280 0.7349 l
+0.2287 0.7346 l
+0.2293 0.7344 l
+0.2300 0.7342 l
+0.2307 0.7339 l
+0.2313 0.7337 l
+0.2320 0.7334 l
+0.2327 0.7331 l
+0.2333 0.7329 l
+0.2340 0.7326 l
+0.2347 0.7324 l
+0.2353 0.7321 l
+0.2360 0.7318 l
+0.2367 0.7316 l
+0.2373 0.7313 l
+0.2380 0.7310 l
+0.2387 0.7307 l
+0.2393 0.7303 l
+0.2400 0.7300 l
+0.2407 0.7297 l
+0.2413 0.7294 l
+0.2420 0.7291 l
+0.2427 0.7287 l
+0.2433 0.7284 l
+0.2440 0.7281 l
+0.2447 0.7277 l
+0.2453 0.7273 l
+0.2460 0.7269 l
+0.2467 0.7266 l
+0.2473 0.7262 l
+0.2480 0.7258 l
+0.2487 0.7253 l
+0.2493 0.7249 l
+0.2500 0.7245 l
+0.2507 0.7241 l
+0.2513 0.7237 l
+0.2520 0.7233 l
+0.2527 0.7229 l
+0.2533 0.7224 l
+0.2540 0.7220 l
+0.2547 0.7215 l
+0.2553 0.7211 l
+0.2560 0.7206 l
+0.2567 0.7201 l
+0.2573 0.7196 l
+0.2580 0.7191 l
+0.2587 0.7186 l
+0.2593 0.7181 l
+0.2600 0.7175 l
+0.2607 0.7170 l
+0.2613 0.7164 l
+0.2620 0.7158 l
+0.2627 0.7152 l
+0.2633 0.7146 l
+0.2640 0.7140 l
+0.2647 0.7135 l
+0.2653 0.7129 l
+0.2660 0.7123 l
+0.2667 0.7117 l
+0.2673 0.7111 l
+0.2680 0.7105 l
+0.2687 0.7098 l
+0.2693 0.7092 l
+0.2700 0.7085 l
+0.2707 0.7079 l
+0.2713 0.7072 l
+0.2720 0.7065 l
+0.2727 0.7058 l
+0.2733 0.7051 l
+0.2740 0.7044 l
+0.2747 0.7036 l
+0.2753 0.7029 l
+0.2760 0.7021 l
+0.2767 0.7014 l
+0.2773 0.7007 l
+0.2780 0.6999 l
+0.2787 0.6991 l
+0.2793 0.6984 l
+0.2800 0.6976 l
+0.2807 0.6967 l
+0.2813 0.6959 l
+0.2820 0.6951 l
+0.2827 0.6943 l
+0.2833 0.6934 l
+0.2840 0.6926 l
+0.2847 0.6917 l
+0.2853 0.6908 l
+0.2860 0.6900 l
+0.2867 0.6891 l
+0.2873 0.6882 l
+0.2880 0.6873 l
+0.2887 0.6864 l
+0.2893 0.6855 l
+0.2900 0.6846 l
+0.2907 0.6837 l
+0.2913 0.6828 l
+0.2920 0.6818 l
+0.2927 0.6809 l
+0.2933 0.6799 l
+0.2940 0.6790 l
+0.2947 0.6780 l
+0.2953 0.6770 l
+0.2960 0.6760 l
+0.2967 0.6751 l
+0.2973 0.6741 l
+0.2980 0.6732 l
+0.2987 0.6722 l
+0.2993 0.6712 l
+0.3000 0.6702 l
+0.3007 0.6692 l
+0.3013 0.6682 l
+0.3020 0.6673 l
+0.3027 0.6663 l
+0.3033 0.6653 l
+0.3040 0.6643 l
+0.3047 0.6633 l
+0.3053 0.6624 l
+0.3060 0.6614 l
+0.3067 0.6604 l
+0.3073 0.6595 l
+0.3080 0.6585 l
+0.3087 0.6575 l
+0.3093 0.6566 l
+0.3100 0.6556 l
+0.3107 0.6546 l
+0.3113 0.6536 l
+0.3120 0.6526 l
+0.3127 0.6517 l
+0.3133 0.6507 l
+0.3140 0.6498 l
+0.3147 0.6489 l
+0.3153 0.6479 l
+0.3160 0.6470 l
+0.3167 0.6461 l
+0.3173 0.6452 l
+0.3180 0.6442 l
+0.3187 0.6433 l
+0.3193 0.6424 l
+0.3200 0.6415 l
+0.3207 0.6407 l
+0.3213 0.6398 l
+0.3220 0.6389 l
+0.3227 0.6380 l
+0.3233 0.6371 l
+0.3240 0.6362 l
+0.3247 0.6353 l
+0.3253 0.6344 l
+0.3260 0.6335 l
+0.3267 0.6327 l
+0.3273 0.6318 l
+0.3280 0.6310 l
+0.3287 0.6301 l
+0.3293 0.6293 l
+0.3300 0.6284 l
+0.3307 0.6276 l
+0.3313 0.6267 l
+0.3320 0.6259 l
+0.3327 0.6251 l
+0.3333 0.6243 l
+0.3340 0.6235 l
+0.3347 0.6227 l
+0.3353 0.6219 l
+0.3360 0.6211 l
+0.3367 0.6203 l
+0.3373 0.6195 l
+0.3380 0.6188 l
+0.3387 0.6180 l
+0.3393 0.6172 l
+0.3400 0.6165 l
+0.3407 0.6157 l
+0.3413 0.6150 l
+0.3420 0.6142 l
+0.3427 0.6135 l
+0.3433 0.6127 l
+0.3440 0.6120 l
+0.3447 0.6112 l
+0.3453 0.6105 l
+0.3460 0.6098 l
+0.3467 0.6090 l
+0.3473 0.6083 l
+0.3480 0.6076 l
+0.3487 0.6069 l
+0.3493 0.6062 l
+0.3500 0.6055 l
+0.3507 0.6048 l
+0.3513 0.6040 l
+0.3520 0.6033 l
+0.3527 0.6026 l
+0.3533 0.6020 l
+0.3540 0.6013 l
+0.3547 0.6006 l
+0.3553 0.5999 l
+0.3560 0.5993 l
+0.3567 0.5986 l
+0.3573 0.5980 l
+0.3580 0.5973 l
+0.3587 0.5967 l
+0.3593 0.5961 l
+0.3600 0.5954 l
+0.3607 0.5948 l
+0.3613 0.5942 l
+0.3620 0.5935 l
+0.3627 0.5929 l
+0.3633 0.5923 l
+0.3640 0.5917 l
+0.3647 0.5911 l
+0.3653 0.5905 l
+0.3660 0.5899 l
+0.3667 0.5893 l
+0.3673 0.5888 l
+0.3680 0.5882 l
+0.3687 0.5876 l
+0.3693 0.5870 l
+0.3700 0.5864 l
+0.3707 0.5859 l
+0.3713 0.5853 l
+0.3720 0.5848 l
+0.3727 0.5842 l
+0.3733 0.5837 l
+0.3740 0.5832 l
+0.3747 0.5826 l
+0.3753 0.5821 l
+0.3760 0.5816 l
+0.3767 0.5810 l
+0.3773 0.5805 l
+0.3780 0.5800 l
+0.3787 0.5795 l
+0.3793 0.5790 l
+0.3800 0.5785 l
+0.3807 0.5780 l
+0.3813 0.5775 l
+0.3820 0.5770 l
+0.3827 0.5766 l
+0.3833 0.5761 l
+0.3840 0.5756 l
+0.3847 0.5751 l
+0.3853 0.5747 l
+0.3860 0.5742 l
+0.3867 0.5737 l
+0.3873 0.5733 l
+0.3880 0.5728 l
+0.3887 0.5724 l
+0.3893 0.5719 l
+0.3900 0.5715 l
+0.3907 0.5711 l
+0.3913 0.5706 l
+0.3920 0.5702 l
+0.3927 0.5698 l
+0.3933 0.5694 l
+0.3940 0.5690 l
+0.3947 0.5686 l
+0.3953 0.5682 l
+0.3960 0.5678 l
+0.3967 0.5674 l
+0.3973 0.5670 l
+0.3980 0.5666 l
+0.3987 0.5662 l
+0.3993 0.5658 l
+0.4000 0.5655 l
+0.4007 0.5651 l
+0.4013 0.5647 l
+0.4020 0.5644 l
+0.4027 0.5640 l
+0.4033 0.5637 l
+0.4040 0.5633 l
+0.4047 0.5629 l
+0.4053 0.5626 l
+0.4060 0.5622 l
+0.4067 0.5619 l
+0.4073 0.5616 l
+0.4080 0.5612 l
+0.4087 0.5609 l
+0.4093 0.5606 l
+0.4100 0.5602 l
+0.4107 0.5599 l
+0.4113 0.5596 l
+0.4120 0.5593 l
+0.4127 0.5590 l
+0.4133 0.5586 l
+0.4140 0.5583 l
+0.4147 0.5580 l
+0.4153 0.5577 l
+0.4160 0.5574 l
+0.4167 0.5571 l
+0.4173 0.5568 l
+0.4180 0.5565 l
+0.4187 0.5562 l
+0.4193 0.5559 l
+0.4200 0.5556 l
+0.4207 0.5553 l
+0.4213 0.5550 l
+0.4220 0.5547 l
+0.4227 0.5544 l
+0.4233 0.5541 l
+0.4240 0.5538 l
+0.4247 0.5535 l
+0.4253 0.5533 l
+0.4260 0.5530 l
+0.4267 0.5527 l
+0.4273 0.5524 l
+0.4280 0.5521 l
+0.4287 0.5518 l
+0.4293 0.5516 l
+0.4300 0.5513 l
+0.4307 0.5510 l
+0.4313 0.5508 l
+0.4320 0.5505 l
+0.4327 0.5502 l
+0.4333 0.5500 l
+0.4340 0.5497 l
+0.4347 0.5494 l
+0.4353 0.5492 l
+0.4360 0.5489 l
+0.4367 0.5486 l
+0.4373 0.5484 l
+0.4380 0.5481 l
+0.4387 0.5478 l
+0.4393 0.5476 l
+0.4400 0.5473 l
+0.4407 0.5471 l
+0.4413 0.5468 l
+0.4420 0.5466 l
+0.4427 0.5463 l
+0.4433 0.5461 l
+0.4440 0.5458 l
+0.4447 0.5456 l
+0.4453 0.5453 l
+0.4460 0.5451 l
+0.4467 0.5448 l
+0.4473 0.5446 l
+0.4480 0.5443 l
+0.4487 0.5441 l
+0.4493 0.5439 l
+0.4500 0.5436 l
+0.4507 0.5434 l
+0.4513 0.5432 l
+0.4520 0.5429 l
+0.4527 0.5427 l
+0.4533 0.5425 l
+0.4540 0.5422 l
+0.4547 0.5420 l
+0.4553 0.5418 l
+0.4560 0.5416 l
+0.4567 0.5413 l
+0.4573 0.5411 l
+0.4580 0.5409 l
+0.4587 0.5407 l
+0.4593 0.5405 l
+0.4600 0.5402 l
+0.4607 0.5400 l
+0.4613 0.5398 l
+0.4620 0.5396 l
+0.4627 0.5394 l
+0.4633 0.5392 l
+0.4640 0.5390 l
+0.4647 0.5388 l
+0.4653 0.5386 l
+0.4660 0.5383 l
+0.4667 0.5381 l
+0.4673 0.5379 l
+0.4680 0.5377 l
+0.4687 0.5375 l
+0.4693 0.5373 l
+0.4700 0.5371 l
+0.4707 0.5369 l
+0.4713 0.5367 l
+0.4720 0.5365 l
+0.4727 0.5363 l
+0.4733 0.5361 l
+0.4740 0.5359 l
+0.4747 0.5358 l
+0.4753 0.5356 l
+0.4760 0.5354 l
+0.4767 0.5352 l
+0.4773 0.5350 l
+0.4780 0.5348 l
+0.4787 0.5346 l
+0.4793 0.5344 l
+0.4800 0.5342 l
+0.4807 0.5341 l
+0.4813 0.5339 l
+0.4820 0.5337 l
+0.4827 0.5335 l
+0.4833 0.5333 l
+0.4840 0.5331 l
+0.4847 0.5330 l
+0.4853 0.5328 l
+0.4860 0.5326 l
+0.4867 0.5324 l
+0.4873 0.5322 l
+0.4880 0.5321 l
+0.4887 0.5319 l
+0.4893 0.5317 l
+0.4900 0.5315 l
+0.4907 0.5314 l
+0.4913 0.5312 l
+0.4920 0.5310 l
+0.4927 0.5309 l
+0.4933 0.5307 l
+0.4940 0.5305 l
+0.4947 0.5304 l
+0.4953 0.5302 l
+0.4960 0.5300 l
+0.4967 0.5299 l
+0.4973 0.5297 l
+0.4980 0.5295 l
+0.4987 0.5294 l
+0.4993 0.5292 l
+0.5000 0.5290 l
+0.5007 0.5289 l
+0.5013 0.5287 l
+0.5020 0.5285 l
+0.5027 0.5284 l
+0.5033 0.5282 l
+0.5040 0.5281 l
+0.5047 0.5279 l
+0.5053 0.5278 l
+0.5060 0.5276 l
+0.5067 0.5274 l
+0.5073 0.5273 l
+0.5080 0.5272 l
+0.5087 0.5271 l
+0.5093 0.5270 l
+0.5100 0.5268 l
+0.5107 0.5267 l
+0.5113 0.5266 l
+0.5120 0.5265 l
+0.5127 0.5264 l
+0.5133 0.5262 l
+0.5140 0.5261 l
+0.5147 0.5260 l
+0.5153 0.5259 l
+0.5160 0.5258 l
+0.5167 0.5257 l
+0.5173 0.5255 l
+0.5180 0.5254 l
+0.5187 0.5253 l
+0.5193 0.5252 l
+0.5200 0.5251 l
+0.5207 0.5250 l
+0.5213 0.5249 l
+0.5220 0.5247 l
+0.5227 0.5246 l
+0.5233 0.5245 l
+0.5240 0.5244 l
+0.5247 0.5243 l
+0.5253 0.5242 l
+0.5260 0.5241 l
+0.5267 0.5240 l
+0.5273 0.5239 l
+0.5280 0.5237 l
+0.5287 0.5236 l
+0.5293 0.5235 l
+0.5300 0.5234 l
+0.5307 0.5233 l
+0.5313 0.5232 l
+0.5320 0.5231 l
+0.5327 0.5230 l
+0.5333 0.5229 l
+0.5340 0.5228 l
+0.5347 0.5227 l
+0.5353 0.5225 l
+0.5360 0.5224 l
+0.5367 0.5223 l
+0.5373 0.5222 l
+0.5380 0.5221 l
+0.5387 0.5220 l
+0.5393 0.5219 l
+0.5400 0.5218 l
+0.5407 0.5217 l
+0.5413 0.5216 l
+0.5420 0.5215 l
+0.5427 0.5214 l
+0.5433 0.5213 l
+0.5440 0.5212 l
+0.5447 0.5211 l
+0.5453 0.5210 l
+0.5460 0.5209 l
+0.5467 0.5208 l
+0.5473 0.5207 l
+0.5480 0.5206 l
+0.5487 0.5205 l
+0.5493 0.5204 l
+0.5500 0.5203 l
+0.5507 0.5202 l
+0.5513 0.5201 l
+0.5520 0.5200 l
+0.5527 0.5199 l
+0.5533 0.5198 l
+0.5540 0.5197 l
+0.5547 0.5196 l
+0.5553 0.5195 l
+0.5560 0.5194 l
+0.5567 0.5193 l
+0.5573 0.5192 l
+0.5580 0.5191 l
+0.5587 0.5190 l
+0.5593 0.5189 l
+0.5600 0.5188 l
+0.5607 0.5187 l
+0.5613 0.5186 l
+0.5620 0.5185 l
+0.5627 0.5184 l
+0.5633 0.5183 l
+0.5640 0.5182 l
+0.5647 0.5181 l
+0.5653 0.5180 l
+0.5660 0.5180 l
+0.5667 0.5179 l
+0.5673 0.5178 l
+0.5680 0.5177 l
+0.5687 0.5176 l
+0.5693 0.5175 l
+0.5700 0.5174 l
+0.5707 0.5173 l
+0.5713 0.5172 l
+0.5720 0.5171 l
+0.5727 0.5170 l
+0.5733 0.5169 l
+0.5740 0.5169 l
+0.5747 0.5168 l
+0.5753 0.5167 l
+0.5760 0.5166 l
+0.5767 0.5165 l
+0.5773 0.5164 l
+0.5780 0.5163 l
+0.5787 0.5162 l
+0.5793 0.5161 l
+0.5800 0.5161 l
+0.5807 0.5160 l
+0.5813 0.5159 l
+0.5820 0.5158 l
+0.5827 0.5157 l
+0.5833 0.5156 l
+0.5840 0.5156 l
+0.5847 0.5155 l
+0.5853 0.5154 l
+0.5860 0.5153 l
+0.5867 0.5152 l
+0.5873 0.5152 l
+0.5880 0.5151 l
+0.5887 0.5150 l
+0.5893 0.5149 l
+0.5900 0.5148 l
+0.5907 0.5148 l
+0.5913 0.5147 l
+0.5920 0.5146 l
+0.5927 0.5145 l
+0.5933 0.5145 l
+0.5940 0.5144 l
+0.5947 0.5143 l
+0.5953 0.5143 l
+0.5960 0.5142 l
+0.5967 0.5141 l
+0.5973 0.5140 l
+0.5980 0.5140 l
+0.5987 0.5139 l
+0.5993 0.5138 l
+0.6000 0.5137 l
+0.6007 0.5137 l
+0.6013 0.5136 l
+0.6020 0.5135 l
+0.6027 0.5135 l
+0.6033 0.5134 l
+0.6040 0.5133 l
+0.6047 0.5133 l
+0.6053 0.5132 l
+0.6060 0.5131 l
+0.6067 0.5130 l
+0.6073 0.5130 l
+0.6080 0.5129 l
+0.6087 0.5128 l
+0.6093 0.5128 l
+0.6100 0.5127 l
+0.6107 0.5126 l
+0.6113 0.5126 l
+0.6120 0.5125 l
+0.6127 0.5124 l
+0.6133 0.5124 l
+0.6140 0.5123 l
+0.6147 0.5122 l
+0.6153 0.5121 l
+0.6160 0.5121 l
+0.6167 0.5120 l
+0.6173 0.5119 l
+0.6180 0.5119 l
+0.6187 0.5118 l
+0.6193 0.5117 l
+0.6200 0.5117 l
+0.6207 0.5116 l
+0.6213 0.5115 l
+0.6220 0.5115 l
+0.6227 0.5114 l
+0.6233 0.5113 l
+0.6240 0.5113 l
+0.6247 0.5112 l
+0.6253 0.5111 l
+0.6260 0.5111 l
+0.6267 0.5110 l
+0.6273 0.5109 l
+0.6280 0.5109 l
+0.6287 0.5108 l
+0.6293 0.5107 l
+0.6300 0.5107 l
+0.6307 0.5106 l
+0.6313 0.5105 l
+0.6320 0.5105 l
+0.6327 0.5104 l
+0.6333 0.5103 l
+0.6340 0.5103 l
+0.6347 0.5102 l
+0.6353 0.5101 l
+0.6360 0.5101 l
+0.6367 0.5100 l
+0.6373 0.5099 l
+0.6380 0.5099 l
+0.6387 0.5098 l
+0.6393 0.5098 l
+0.6400 0.5097 l
+0.6407 0.5096 l
+0.6413 0.5096 l
+0.6420 0.5095 l
+0.6427 0.5094 l
+0.6433 0.5094 l
+0.6440 0.5093 l
+0.6447 0.5092 l
+0.6453 0.5092 l
+0.6460 0.5091 l
+0.6467 0.5091 l
+0.6473 0.5090 l
+0.6480 0.5089 l
+0.6487 0.5089 l
+0.6493 0.5088 l
+0.6500 0.5087 l
+0.6507 0.5087 l
+0.6513 0.5086 l
+0.6520 0.5086 l
+0.6527 0.5085 l
+0.6533 0.5084 l
+0.6540 0.5084 l
+0.6547 0.5083 l
+0.6553 0.5082 l
+0.6560 0.5082 l
+0.6567 0.5081 l
+0.6573 0.5081 l
+0.6580 0.5080 l
+0.6587 0.5079 l
+0.6593 0.5079 l
+0.6600 0.5078 l
+0.6607 0.5078 l
+0.6613 0.5077 l
+0.6620 0.5076 l
+0.6627 0.5076 l
+0.6633 0.5075 l
+0.6640 0.5075 l
+0.6647 0.5074 l
+0.6653 0.5073 l
+0.6660 0.5073 l
+0.6667 0.5072 l
+0.6673 0.5072 l
+0.6680 0.5071 l
+0.6687 0.5070 l
+0.6693 0.5070 l
+0.6700 0.5069 l
+0.6707 0.5069 l
+0.6713 0.5068 l
+0.6720 0.5067 l
+0.6727 0.5067 l
+0.6733 0.5066 l
+0.6740 0.5066 l
+0.6747 0.5065 l
+0.6753 0.5065 l
+0.6760 0.5064 l
+0.6767 0.5063 l
+0.6773 0.5063 l
+0.6780 0.5062 l
+0.6787 0.5062 l
+0.6793 0.5061 l
+0.6800 0.5061 l
+0.6807 0.5060 l
+0.6813 0.5059 l
+0.6820 0.5059 l
+0.6827 0.5058 l
+0.6833 0.5058 l
+0.6840 0.5057 l
+0.6847 0.5057 l
+0.6853 0.5056 l
+0.6860 0.5056 l
+0.6867 0.5055 l
+0.6873 0.5054 l
+0.6880 0.5054 l
+0.6887 0.5053 l
+0.6893 0.5053 l
+0.6900 0.5052 l
+0.6907 0.5052 l
+0.6913 0.5051 l
+0.6920 0.5051 l
+0.6927 0.5050 l
+0.6933 0.5050 l
+0.6940 0.5049 l
+0.6947 0.5049 l
+0.6953 0.5048 l
+0.6960 0.5048 l
+0.6967 0.5047 l
+0.6973 0.5047 l
+0.6980 0.5046 l
+0.6987 0.5046 l
+0.6993 0.5045 l
+0.7000 0.5045 l
+0.7007 0.5044 l
+0.7013 0.5044 l
+0.7020 0.5043 l
+0.7027 0.5043 l
+0.7033 0.5042 l
+0.7040 0.5042 l
+0.7047 0.5041 l
+0.7053 0.5041 l
+0.7060 0.5040 l
+0.7067 0.5040 l
+0.7073 0.5039 l
+0.7080 0.5039 l
+0.7087 0.5038 l
+0.7093 0.5038 l
+0.7100 0.5037 l
+0.7107 0.5037 l
+0.7113 0.5036 l
+0.7120 0.5036 l
+0.7127 0.5035 l
+0.7133 0.5035 l
+0.7140 0.5035 l
+0.7147 0.5034 l
+0.7153 0.5034 l
+0.7160 0.5033 l
+0.7167 0.5033 l
+0.7173 0.5032 l
+0.7180 0.5032 l
+0.7187 0.5031 l
+0.7193 0.5031 l
+0.7200 0.5030 l
+0.7207 0.5030 l
+0.7213 0.5030 l
+0.7220 0.5029 l
+0.7227 0.5029 l
+0.7233 0.5028 l
+0.7240 0.5028 l
+0.7247 0.5027 l
+0.7253 0.5027 l
+0.7260 0.5027 l
+0.7267 0.5026 l
+0.7273 0.5026 l
+0.7280 0.5025 l
+0.7287 0.5025 l
+0.7293 0.5025 l
+0.7300 0.5024 l
+0.7307 0.5024 l
+0.7313 0.5023 l
+0.7320 0.5023 l
+0.7327 0.5023 l
+0.7333 0.5022 l
+0.7340 0.5022 l
+0.7347 0.5021 l
+0.7353 0.5021 l
+0.7360 0.5021 l
+0.7367 0.5020 l
+0.7373 0.5020 l
+0.7380 0.5019 l
+0.7387 0.5019 l
+0.7393 0.5019 l
+0.7400 0.5018 l
+0.7407 0.5018 l
+0.7413 0.5018 l
+0.7420 0.5017 l
+0.7427 0.5017 l
+0.7433 0.5017 l
+0.7440 0.5016 l
+0.7447 0.5016 l
+0.7453 0.5015 l
+0.7460 0.5015 l
+0.7467 0.5015 l
+0.7473 0.5014 l
+0.7480 0.5014 l
+0.7487 0.5014 l
+0.7493 0.5013 l
+0.7500 0.5013 l
+0.7507 0.5013 l
+0.7513 0.5012 l
+0.7520 0.5012 l
+0.7527 0.5012 l
+0.7533 0.5011 l
+0.7540 0.5011 l
+0.7547 0.5011 l
+0.7553 0.5010 l
+0.7560 0.5010 l
+0.7567 0.5010 l
+0.7573 0.5010 l
+0.7580 0.5009 l
+0.7587 0.5009 l
+0.7593 0.5009 l
+0.7600 0.5008 l
+0.7607 0.5008 l
+0.7613 0.5008 l
+0.7620 0.5007 l
+0.7627 0.5007 l
+0.7633 0.5007 l
+0.7640 0.5007 l
+0.7647 0.5006 l
+0.7653 0.5006 l
+0.7660 0.5006 l
+0.7667 0.5005 l
+0.7673 0.5005 l
+0.7680 0.5005 l
+0.7687 0.5005 l
+0.7693 0.5004 l
+0.7700 0.5004 l
+0.7707 0.5004 l
+0.7713 0.5004 l
+0.7720 0.5003 l
+0.7727 0.5003 l
+0.7733 0.5003 l
+0.7740 0.5003 l
+0.7747 0.5002 l
+0.7753 0.5002 l
+0.7760 0.5002 l
+0.7767 0.5002 l
+0.7773 0.5001 l
+0.7780 0.5001 l
+0.7787 0.5001 l
+0.7793 0.5001 l
+0.7800 0.5000 l
+0.7807 0.5000 l
+0.7813 0.5000 l
+0.7820 0.5000 l
+0.7827 0.4999 l
+0.7833 0.4999 l
+0.7840 0.4999 l
+0.7847 0.4999 l
+0.7853 0.4998 l
+0.7860 0.4998 l
+0.7867 0.4998 l
+0.7873 0.4998 l
+0.7880 0.4998 l
+0.7887 0.4997 l
+0.7893 0.4997 l
+0.7900 0.4997 l
+0.7907 0.4997 l
+0.7913 0.4997 l
+0.7920 0.4996 l
+0.7927 0.4996 l
+0.7933 0.4996 l
+0.7940 0.4996 l
+0.7947 0.4996 l
+0.7953 0.4995 l
+0.7960 0.4995 l
+0.7967 0.4995 l
+0.7973 0.4995 l
+0.7980 0.4995 l
+0.7987 0.4994 l
+0.7993 0.4994 l
+0.8000 0.4994 l
+0.8007 0.4994 l
+0.8013 0.4994 l
+0.8020 0.4993 l
+0.8027 0.4993 l
+0.8033 0.4993 l
+0.8040 0.4993 l
+0.8047 0.4993 l
+0.8053 0.4993 l
+0.8060 0.4992 l
+0.8067 0.4992 l
+0.8073 0.4992 l
+0.8080 0.4992 l
+0.8087 0.4992 l
+0.8093 0.4991 l
+0.8100 0.4991 l
+0.8107 0.4991 l
+0.8113 0.4991 l
+0.8120 0.4991 l
+0.8127 0.4991 l
+0.8133 0.4990 l
+0.8140 0.4990 l
+0.8147 0.4990 l
+0.8153 0.4990 l
+0.8160 0.4990 l
+s
+n
+0.1500 0.1936 m
+0.1507 0.1946 l
+0.1513 0.1957 l
+0.1520 0.1967 l
+0.1527 0.1978 l
+0.1533 0.1989 l
+0.1540 0.2000 l
+0.1547 0.2011 l
+0.1553 0.2021 l
+0.1560 0.2031 l
+0.1567 0.2042 l
+0.1573 0.2052 l
+0.1580 0.2063 l
+0.1587 0.2074 l
+0.1593 0.2085 l
+0.1600 0.2095 l
+0.1607 0.2106 l
+0.1613 0.2117 l
+0.1620 0.2128 l
+0.1627 0.2139 l
+0.1633 0.2148 l
+0.1640 0.2159 l
+0.1647 0.2169 l
+0.1653 0.2179 l
+0.1660 0.2189 l
+0.1667 0.2200 l
+0.1673 0.2208 l
+0.1680 0.2217 l
+0.1687 0.2226 l
+0.1693 0.2235 l
+0.1700 0.2243 l
+0.1707 0.2250 l
+0.1713 0.2257 l
+0.1720 0.2265 l
+0.1727 0.2272 l
+0.1733 0.2280 l
+0.1740 0.2286 l
+0.1747 0.2293 l
+0.1753 0.2301 l
+0.1760 0.2308 l
+0.1767 0.2315 l
+0.1773 0.2322 l
+0.1780 0.2330 l
+0.1787 0.2337 l
+0.1793 0.2344 l
+0.1800 0.2352 l
+0.1807 0.2359 l
+0.1813 0.2367 l
+0.1820 0.2376 l
+0.1827 0.2383 l
+0.1833 0.2391 l
+0.1840 0.2399 l
+0.1847 0.2407 l
+0.1853 0.2416 l
+0.1860 0.2425 l
+0.1867 0.2434 l
+0.1873 0.2442 l
+0.1880 0.2450 l
+0.1887 0.2457 l
+0.1893 0.2463 l
+0.1900 0.2468 l
+0.1907 0.2472 l
+0.1913 0.2475 l
+0.1920 0.2478 l
+0.1927 0.2482 l
+0.1933 0.2485 l
+0.1940 0.2489 l
+0.1947 0.2492 l
+0.1953 0.2496 l
+0.1960 0.2500 l
+0.1967 0.2504 l
+0.1973 0.2507 l
+0.1980 0.2511 l
+0.1987 0.2515 l
+0.1993 0.2519 l
+0.2000 0.2523 l
+0.2007 0.2527 l
+0.2013 0.2532 l
+0.2020 0.2536 l
+0.2027 0.2540 l
+0.2033 0.2544 l
+0.2040 0.2549 l
+0.2047 0.2553 l
+0.2053 0.2557 l
+0.2060 0.2561 l
+0.2067 0.2565 l
+0.2073 0.2569 l
+0.2080 0.2573 l
+0.2087 0.2577 l
+0.2093 0.2580 l
+0.2100 0.2584 l
+0.2107 0.2588 l
+0.2113 0.2592 l
+0.2120 0.2597 l
+0.2127 0.2601 l
+0.2133 0.2605 l
+0.2140 0.2609 l
+0.2147 0.2614 l
+0.2153 0.2618 l
+0.2160 0.2623 l
+0.2167 0.2627 l
+0.2173 0.2632 l
+0.2180 0.2637 l
+0.2187 0.2641 l
+0.2193 0.2646 l
+0.2200 0.2651 l
+0.2207 0.2656 l
+0.2213 0.2659 l
+0.2220 0.2662 l
+0.2227 0.2666 l
+0.2233 0.2669 l
+0.2240 0.2672 l
+0.2247 0.2676 l
+0.2253 0.2680 l
+0.2260 0.2684 l
+0.2267 0.2687 l
+0.2273 0.2691 l
+0.2280 0.2695 l
+0.2287 0.2699 l
+0.2293 0.2704 l
+0.2300 0.2708 l
+0.2307 0.2712 l
+0.2313 0.2717 l
+0.2320 0.2722 l
+0.2327 0.2726 l
+0.2333 0.2731 l
+0.2340 0.2736 l
+0.2347 0.2741 l
+0.2353 0.2747 l
+0.2360 0.2752 l
+0.2367 0.2757 l
+0.2373 0.2763 l
+0.2380 0.2768 l
+0.2387 0.2774 l
+0.2393 0.2780 l
+0.2400 0.2785 l
+0.2407 0.2790 l
+0.2413 0.2794 l
+0.2420 0.2799 l
+0.2427 0.2803 l
+0.2433 0.2808 l
+0.2440 0.2812 l
+0.2447 0.2817 l
+0.2453 0.2822 l
+0.2460 0.2827 l
+0.2467 0.2831 l
+0.2473 0.2836 l
+0.2480 0.2841 l
+0.2487 0.2846 l
+0.2493 0.2851 l
+0.2500 0.2856 l
+0.2507 0.2861 l
+0.2513 0.2866 l
+0.2520 0.2871 l
+0.2527 0.2876 l
+0.2533 0.2881 l
+0.2540 0.2886 l
+0.2547 0.2891 l
+0.2553 0.2897 l
+0.2560 0.2902 l
+0.2567 0.2907 l
+0.2573 0.2913 l
+0.2580 0.2918 l
+0.2587 0.2923 l
+0.2593 0.2929 l
+0.2600 0.2934 l
+0.2607 0.2939 l
+0.2613 0.2945 l
+0.2620 0.2950 l
+0.2627 0.2956 l
+0.2633 0.2961 l
+0.2640 0.2967 l
+0.2647 0.2973 l
+0.2653 0.2978 l
+0.2660 0.2984 l
+0.2667 0.2990 l
+0.2673 0.2995 l
+0.2680 0.3001 l
+0.2687 0.3007 l
+0.2693 0.3013 l
+0.2700 0.3018 l
+0.2707 0.3024 l
+0.2713 0.3030 l
+0.2720 0.3036 l
+0.2727 0.3042 l
+0.2733 0.3048 l
+0.2740 0.3054 l
+0.2747 0.3060 l
+0.2753 0.3066 l
+0.2760 0.3072 l
+0.2767 0.3078 l
+0.2773 0.3084 l
+0.2780 0.3090 l
+0.2787 0.3096 l
+0.2793 0.3103 l
+0.2800 0.3109 l
+0.2807 0.3115 l
+0.2813 0.3121 l
+0.2820 0.3128 l
+0.2827 0.3134 l
+0.2833 0.3140 l
+0.2840 0.3146 l
+0.2847 0.3153 l
+0.2853 0.3159 l
+0.2860 0.3166 l
+0.2867 0.3172 l
+0.2873 0.3179 l
+0.2880 0.3185 l
+0.2887 0.3192 l
+0.2893 0.3198 l
+0.2900 0.3205 l
+0.2907 0.3211 l
+0.2913 0.3218 l
+0.2920 0.3224 l
+0.2927 0.3231 l
+0.2933 0.3238 l
+0.2940 0.3244 l
+0.2947 0.3251 l
+0.2953 0.3258 l
+0.2960 0.3265 l
+0.2967 0.3271 l
+0.2973 0.3278 l
+0.2980 0.3285 l
+0.2987 0.3292 l
+0.2993 0.3299 l
+0.3000 0.3306 l
+0.3007 0.3313 l
+0.3013 0.3320 l
+0.3020 0.3327 l
+0.3027 0.3334 l
+0.3033 0.3341 l
+0.3040 0.3348 l
+0.3047 0.3355 l
+0.3053 0.3362 l
+0.3060 0.3369 l
+0.3067 0.3376 l
+0.3073 0.3383 l
+0.3080 0.3391 l
+0.3087 0.3398 l
+0.3093 0.3405 l
+0.3100 0.3412 l
+0.3107 0.3419 l
+0.3113 0.3426 l
+0.3120 0.3433 l
+0.3127 0.3440 l
+0.3133 0.3448 l
+0.3140 0.3455 l
+0.3147 0.3462 l
+0.3153 0.3469 l
+0.3160 0.3476 l
+0.3167 0.3483 l
+0.3173 0.3490 l
+0.3180 0.3498 l
+0.3187 0.3505 l
+0.3193 0.3512 l
+0.3200 0.3519 l
+0.3207 0.3526 l
+0.3213 0.3533 l
+0.3220 0.3540 l
+0.3227 0.3548 l
+0.3233 0.3555 l
+0.3240 0.3562 l
+0.3247 0.3569 l
+0.3253 0.3576 l
+0.3260 0.3583 l
+0.3267 0.3590 l
+0.3273 0.3597 l
+0.3280 0.3604 l
+0.3287 0.3611 l
+0.3293 0.3618 l
+0.3300 0.3625 l
+0.3307 0.3632 l
+0.3313 0.3639 l
+0.3320 0.3646 l
+0.3327 0.3653 l
+0.3333 0.3660 l
+0.3340 0.3666 l
+0.3347 0.3673 l
+0.3353 0.3680 l
+0.3360 0.3687 l
+0.3367 0.3694 l
+0.3373 0.3700 l
+0.3380 0.3707 l
+0.3387 0.3714 l
+0.3393 0.3720 l
+0.3400 0.3727 l
+0.3407 0.3734 l
+0.3413 0.3740 l
+0.3420 0.3747 l
+0.3427 0.3753 l
+0.3433 0.3760 l
+0.3440 0.3766 l
+0.3447 0.3773 l
+0.3453 0.3779 l
+0.3460 0.3786 l
+0.3467 0.3792 l
+0.3473 0.3798 l
+0.3480 0.3805 l
+0.3487 0.3811 l
+0.3493 0.3817 l
+0.3500 0.3824 l
+0.3507 0.3830 l
+0.3513 0.3836 l
+0.3520 0.3842 l
+0.3527 0.3849 l
+0.3533 0.3855 l
+0.3540 0.3861 l
+0.3547 0.3867 l
+0.3553 0.3873 l
+0.3560 0.3879 l
+0.3567 0.3885 l
+0.3573 0.3891 l
+0.3580 0.3897 l
+0.3587 0.3903 l
+0.3593 0.3909 l
+0.3600 0.3915 l
+0.3607 0.3920 l
+0.3613 0.3926 l
+0.3620 0.3932 l
+0.3627 0.3938 l
+0.3633 0.3943 l
+0.3640 0.3949 l
+0.3647 0.3955 l
+0.3653 0.3960 l
+0.3660 0.3966 l
+0.3667 0.3971 l
+0.3673 0.3977 l
+0.3680 0.3982 l
+0.3687 0.3987 l
+0.3693 0.3993 l
+0.3700 0.3998 l
+0.3707 0.4003 l
+0.3713 0.4009 l
+0.3720 0.4014 l
+0.3727 0.4019 l
+0.3733 0.4024 l
+0.3740 0.4029 l
+0.3747 0.4035 l
+0.3753 0.4040 l
+0.3760 0.4045 l
+0.3767 0.4050 l
+0.3773 0.4055 l
+0.3780 0.4060 l
+0.3787 0.4065 l
+0.3793 0.4070 l
+0.3800 0.4075 l
+0.3807 0.4079 l
+0.3813 0.4084 l
+0.3820 0.4089 l
+0.3827 0.4094 l
+0.3833 0.4099 l
+0.3840 0.4103 l
+0.3847 0.4108 l
+0.3853 0.4113 l
+0.3860 0.4118 l
+0.3867 0.4122 l
+0.3873 0.4127 l
+0.3880 0.4132 l
+0.3887 0.4136 l
+0.3893 0.4141 l
+0.3900 0.4145 l
+0.3907 0.4150 l
+0.3913 0.4154 l
+0.3920 0.4159 l
+0.3927 0.4163 l
+0.3933 0.4168 l
+0.3940 0.4172 l
+0.3947 0.4177 l
+0.3953 0.4181 l
+0.3960 0.4186 l
+0.3967 0.4190 l
+0.3973 0.4194 l
+0.3980 0.4199 l
+0.3987 0.4203 l
+0.3993 0.4207 l
+0.4000 0.4212 l
+0.4007 0.4216 l
+0.4013 0.4220 l
+0.4020 0.4224 l
+0.4027 0.4228 l
+0.4033 0.4233 l
+0.4040 0.4237 l
+0.4047 0.4241 l
+0.4053 0.4245 l
+0.4060 0.4249 l
+0.4067 0.4253 l
+0.4073 0.4257 l
+0.4080 0.4261 l
+0.4087 0.4265 l
+0.4093 0.4269 l
+0.4100 0.4273 l
+0.4107 0.4277 l
+0.4113 0.4281 l
+0.4120 0.4285 l
+0.4127 0.4289 l
+0.4133 0.4293 l
+0.4140 0.4297 l
+0.4147 0.4300 l
+0.4153 0.4304 l
+0.4160 0.4308 l
+0.4167 0.4312 l
+0.4173 0.4316 l
+0.4180 0.4320 l
+0.4187 0.4323 l
+0.4193 0.4327 l
+0.4200 0.4331 l
+0.4207 0.4334 l
+0.4213 0.4338 l
+0.4220 0.4342 l
+0.4227 0.4345 l
+0.4233 0.4349 l
+0.4240 0.4353 l
+0.4247 0.4356 l
+0.4253 0.4360 l
+0.4260 0.4364 l
+0.4267 0.4367 l
+0.4273 0.4371 l
+0.4280 0.4374 l
+0.4287 0.4378 l
+0.4293 0.4381 l
+0.4300 0.4385 l
+0.4307 0.4388 l
+0.4313 0.4391 l
+0.4320 0.4395 l
+0.4327 0.4398 l
+0.4333 0.4402 l
+0.4340 0.4405 l
+0.4347 0.4408 l
+0.4353 0.4411 l
+0.4360 0.4415 l
+0.4367 0.4418 l
+0.4373 0.4421 l
+0.4380 0.4424 l
+0.4387 0.4428 l
+0.4393 0.4431 l
+0.4400 0.4434 l
+0.4407 0.4437 l
+0.4413 0.4440 l
+0.4420 0.4444 l
+0.4427 0.4447 l
+0.4433 0.4450 l
+0.4440 0.4453 l
+0.4447 0.4456 l
+0.4453 0.4459 l
+0.4460 0.4462 l
+0.4467 0.4465 l
+0.4473 0.4468 l
+0.4480 0.4471 l
+0.4487 0.4474 l
+0.4493 0.4477 l
+0.4500 0.4480 l
+0.4507 0.4482 l
+0.4513 0.4485 l
+0.4520 0.4488 l
+0.4527 0.4491 l
+0.4533 0.4494 l
+0.4540 0.4496 l
+0.4547 0.4499 l
+0.4553 0.4502 l
+0.4560 0.4505 l
+0.4567 0.4507 l
+0.4573 0.4510 l
+0.4580 0.4512 l
+0.4587 0.4515 l
+0.4593 0.4518 l
+0.4600 0.4520 l
+0.4607 0.4523 l
+0.4613 0.4525 l
+0.4620 0.4528 l
+0.4627 0.4530 l
+0.4633 0.4533 l
+0.4640 0.4535 l
+0.4647 0.4538 l
+0.4653 0.4540 l
+0.4660 0.4543 l
+0.4667 0.4545 l
+0.4673 0.4547 l
+0.4680 0.4550 l
+0.4687 0.4552 l
+0.4693 0.4554 l
+0.4700 0.4557 l
+0.4707 0.4559 l
+0.4713 0.4561 l
+0.4720 0.4563 l
+0.4727 0.4566 l
+0.4733 0.4568 l
+0.4740 0.4570 l
+0.4747 0.4572 l
+0.4753 0.4574 l
+0.4760 0.4577 l
+0.4767 0.4579 l
+0.4773 0.4581 l
+0.4780 0.4583 l
+0.4787 0.4585 l
+0.4793 0.4587 l
+0.4800 0.4589 l
+0.4807 0.4591 l
+0.4813 0.4594 l
+0.4820 0.4596 l
+0.4827 0.4598 l
+0.4833 0.4600 l
+0.4840 0.4602 l
+0.4847 0.4604 l
+0.4853 0.4606 l
+0.4860 0.4608 l
+0.4867 0.4610 l
+0.4873 0.4612 l
+0.4880 0.4613 l
+0.4887 0.4615 l
+0.4893 0.4617 l
+0.4900 0.4619 l
+0.4907 0.4621 l
+0.4913 0.4623 l
+0.4920 0.4625 l
+0.4927 0.4627 l
+0.4933 0.4628 l
+0.4940 0.4630 l
+0.4947 0.4632 l
+0.4953 0.4634 l
+0.4960 0.4635 l
+0.4967 0.4637 l
+0.4973 0.4639 l
+0.4980 0.4641 l
+0.4987 0.4642 l
+0.4993 0.4644 l
+0.5000 0.4646 l
+0.5007 0.4647 l
+0.5013 0.4649 l
+0.5020 0.4651 l
+0.5027 0.4652 l
+0.5033 0.4654 l
+0.5040 0.4656 l
+0.5047 0.4657 l
+0.5053 0.4659 l
+0.5060 0.4661 l
+0.5067 0.4662 l
+0.5073 0.4664 l
+0.5080 0.4665 l
+0.5087 0.4667 l
+0.5093 0.4668 l
+0.5100 0.4670 l
+0.5107 0.4671 l
+0.5113 0.4673 l
+0.5120 0.4674 l
+0.5127 0.4676 l
+0.5133 0.4677 l
+0.5140 0.4678 l
+0.5147 0.4680 l
+0.5153 0.4681 l
+0.5160 0.4683 l
+0.5167 0.4684 l
+0.5173 0.4685 l
+0.5180 0.4687 l
+0.5187 0.4688 l
+0.5193 0.4689 l
+0.5200 0.4690 l
+0.5207 0.4692 l
+0.5213 0.4693 l
+0.5220 0.4694 l
+0.5227 0.4695 l
+0.5233 0.4696 l
+0.5240 0.4697 l
+0.5247 0.4698 l
+0.5253 0.4699 l
+0.5260 0.4700 l
+0.5267 0.4702 l
+0.5273 0.4703 l
+0.5280 0.4704 l
+0.5287 0.4705 l
+0.5293 0.4706 l
+0.5300 0.4707 l
+0.5307 0.4708 l
+0.5313 0.4709 l
+0.5320 0.4710 l
+0.5327 0.4711 l
+0.5333 0.4712 l
+0.5340 0.4713 l
+0.5347 0.4715 l
+0.5353 0.4716 l
+0.5360 0.4717 l
+0.5367 0.4718 l
+0.5373 0.4719 l
+0.5380 0.4720 l
+0.5387 0.4721 l
+0.5393 0.4722 l
+0.5400 0.4723 l
+0.5407 0.4724 l
+0.5413 0.4725 l
+0.5420 0.4726 l
+0.5427 0.4727 l
+0.5433 0.4728 l
+0.5440 0.4729 l
+0.5447 0.4730 l
+0.5453 0.4731 l
+0.5460 0.4732 l
+0.5467 0.4733 l
+0.5473 0.4734 l
+0.5480 0.4735 l
+0.5487 0.4736 l
+0.5493 0.4737 l
+0.5500 0.4738 l
+0.5507 0.4739 l
+0.5513 0.4740 l
+0.5520 0.4740 l
+0.5527 0.4741 l
+0.5533 0.4742 l
+0.5540 0.4743 l
+0.5547 0.4744 l
+0.5553 0.4745 l
+0.5560 0.4746 l
+0.5567 0.4747 l
+0.5573 0.4748 l
+0.5580 0.4749 l
+0.5587 0.4750 l
+0.5593 0.4751 l
+0.5600 0.4751 l
+0.5607 0.4752 l
+0.5613 0.4753 l
+0.5620 0.4754 l
+0.5627 0.4755 l
+0.5633 0.4756 l
+0.5640 0.4757 l
+0.5647 0.4758 l
+0.5653 0.4758 l
+0.5660 0.4759 l
+0.5667 0.4760 l
+0.5673 0.4761 l
+0.5680 0.4762 l
+0.5687 0.4763 l
+0.5693 0.4764 l
+0.5700 0.4764 l
+0.5707 0.4765 l
+0.5713 0.4766 l
+0.5720 0.4767 l
+0.5727 0.4768 l
+0.5733 0.4769 l
+0.5740 0.4770 l
+0.5747 0.4770 l
+0.5753 0.4771 l
+0.5760 0.4772 l
+0.5767 0.4773 l
+0.5773 0.4774 l
+0.5780 0.4775 l
+0.5787 0.4775 l
+0.5793 0.4776 l
+0.5800 0.4777 l
+0.5807 0.4778 l
+0.5813 0.4779 l
+0.5820 0.4780 l
+0.5827 0.4780 l
+0.5833 0.4781 l
+0.5840 0.4782 l
+0.5847 0.4783 l
+0.5853 0.4784 l
+0.5860 0.4785 l
+0.5867 0.4785 l
+0.5873 0.4786 l
+0.5880 0.4787 l
+0.5887 0.4788 l
+0.5893 0.4789 l
+0.5900 0.4789 l
+0.5907 0.4790 l
+0.5913 0.4791 l
+0.5920 0.4792 l
+0.5927 0.4793 l
+0.5933 0.4794 l
+0.5940 0.4794 l
+0.5947 0.4795 l
+0.5953 0.4796 l
+0.5960 0.4797 l
+0.5967 0.4798 l
+0.5973 0.4798 l
+0.5980 0.4799 l
+0.5987 0.4800 l
+0.5993 0.4801 l
+0.6000 0.4802 l
+0.6007 0.4802 l
+0.6013 0.4803 l
+0.6020 0.4804 l
+0.6027 0.4805 l
+0.6033 0.4805 l
+0.6040 0.4806 l
+0.6047 0.4807 l
+0.6053 0.4808 l
+0.6060 0.4809 l
+0.6067 0.4809 l
+0.6073 0.4810 l
+0.6080 0.4811 l
+0.6087 0.4812 l
+0.6093 0.4812 l
+0.6100 0.4813 l
+0.6107 0.4814 l
+0.6113 0.4815 l
+0.6120 0.4815 l
+0.6127 0.4816 l
+0.6133 0.4817 l
+0.6140 0.4818 l
+0.6147 0.4818 l
+0.6153 0.4819 l
+0.6160 0.4820 l
+0.6167 0.4821 l
+0.6173 0.4821 l
+0.6180 0.4822 l
+0.6187 0.4823 l
+0.6193 0.4824 l
+0.6200 0.4824 l
+0.6207 0.4825 l
+0.6213 0.4826 l
+0.6220 0.4826 l
+0.6227 0.4827 l
+0.6233 0.4828 l
+0.6240 0.4829 l
+0.6247 0.4829 l
+0.6253 0.4830 l
+0.6260 0.4831 l
+0.6267 0.4831 l
+0.6273 0.4832 l
+0.6280 0.4833 l
+0.6287 0.4834 l
+0.6293 0.4834 l
+0.6300 0.4835 l
+0.6307 0.4836 l
+0.6313 0.4836 l
+0.6320 0.4837 l
+0.6327 0.4838 l
+0.6333 0.4839 l
+0.6340 0.4839 l
+0.6347 0.4840 l
+0.6353 0.4841 l
+0.6360 0.4841 l
+0.6367 0.4842 l
+0.6373 0.4843 l
+0.6380 0.4843 l
+0.6387 0.4844 l
+0.6393 0.4845 l
+0.6400 0.4845 l
+0.6407 0.4846 l
+0.6413 0.4847 l
+0.6420 0.4847 l
+0.6427 0.4848 l
+0.6433 0.4849 l
+0.6440 0.4849 l
+0.6447 0.4850 l
+0.6453 0.4851 l
+0.6460 0.4851 l
+0.6467 0.4852 l
+0.6473 0.4853 l
+0.6480 0.4853 l
+0.6487 0.4854 l
+0.6493 0.4855 l
+0.6500 0.4855 l
+0.6507 0.4856 l
+0.6513 0.4857 l
+0.6520 0.4857 l
+0.6527 0.4858 l
+0.6533 0.4858 l
+0.6540 0.4859 l
+0.6547 0.4860 l
+0.6553 0.4860 l
+0.6560 0.4861 l
+0.6567 0.4862 l
+0.6573 0.4862 l
+0.6580 0.4863 l
+0.6587 0.4863 l
+0.6593 0.4864 l
+0.6600 0.4865 l
+0.6607 0.4865 l
+0.6613 0.4866 l
+0.6620 0.4867 l
+0.6627 0.4867 l
+0.6633 0.4868 l
+0.6640 0.4868 l
+0.6647 0.4869 l
+0.6653 0.4870 l
+0.6660 0.4870 l
+0.6667 0.4871 l
+0.6673 0.4871 l
+0.6680 0.4872 l
+0.6687 0.4873 l
+0.6693 0.4873 l
+0.6700 0.4874 l
+0.6707 0.4874 l
+0.6713 0.4875 l
+0.6720 0.4875 l
+0.6727 0.4876 l
+0.6733 0.4877 l
+0.6740 0.4877 l
+0.6747 0.4878 l
+0.6753 0.4878 l
+0.6760 0.4879 l
+0.6767 0.4880 l
+0.6773 0.4880 l
+0.6780 0.4881 l
+0.6787 0.4881 l
+0.6793 0.4882 l
+0.6800 0.4882 l
+0.6807 0.4883 l
+0.6813 0.4883 l
+0.6820 0.4884 l
+0.6827 0.4885 l
+0.6833 0.4885 l
+0.6840 0.4886 l
+0.6847 0.4886 l
+0.6853 0.4887 l
+0.6860 0.4887 l
+0.6867 0.4888 l
+0.6873 0.4888 l
+0.6880 0.4889 l
+0.6887 0.4889 l
+0.6893 0.4890 l
+0.6900 0.4891 l
+0.6907 0.4891 l
+0.6913 0.4892 l
+0.6920 0.4892 l
+0.6927 0.4893 l
+0.6933 0.4893 l
+0.6940 0.4894 l
+0.6947 0.4894 l
+0.6953 0.4895 l
+0.6960 0.4895 l
+0.6967 0.4896 l
+0.6973 0.4896 l
+0.6980 0.4897 l
+0.6987 0.4897 l
+0.6993 0.4898 l
+0.7000 0.4898 l
+0.7007 0.4899 l
+0.7013 0.4899 l
+0.7020 0.4900 l
+0.7027 0.4900 l
+0.7033 0.4901 l
+0.7040 0.4901 l
+0.7047 0.4902 l
+0.7053 0.4902 l
+0.7060 0.4903 l
+0.7067 0.4903 l
+0.7073 0.4904 l
+0.7080 0.4904 l
+0.7087 0.4905 l
+0.7093 0.4905 l
+0.7100 0.4905 l
+0.7107 0.4906 l
+0.7113 0.4906 l
+0.7120 0.4907 l
+0.7127 0.4907 l
+0.7133 0.4908 l
+0.7140 0.4908 l
+0.7147 0.4909 l
+0.7153 0.4909 l
+0.7160 0.4910 l
+0.7167 0.4910 l
+0.7173 0.4911 l
+0.7180 0.4911 l
+0.7187 0.4911 l
+0.7193 0.4912 l
+0.7200 0.4912 l
+0.7207 0.4913 l
+0.7213 0.4913 l
+0.7220 0.4914 l
+0.7227 0.4914 l
+0.7233 0.4915 l
+0.7240 0.4915 l
+0.7247 0.4915 l
+0.7253 0.4916 l
+0.7260 0.4916 l
+0.7267 0.4917 l
+0.7273 0.4917 l
+0.7280 0.4918 l
+0.7287 0.4918 l
+0.7293 0.4918 l
+0.7300 0.4919 l
+0.7307 0.4919 l
+0.7313 0.4920 l
+0.7320 0.4920 l
+0.7327 0.4920 l
+0.7333 0.4921 l
+0.7340 0.4921 l
+0.7347 0.4922 l
+0.7353 0.4922 l
+0.7360 0.4922 l
+0.7367 0.4923 l
+0.7373 0.4923 l
+0.7380 0.4924 l
+0.7387 0.4924 l
+0.7393 0.4924 l
+0.7400 0.4925 l
+0.7407 0.4925 l
+0.7413 0.4926 l
+0.7420 0.4926 l
+0.7427 0.4926 l
+0.7433 0.4927 l
+0.7440 0.4927 l
+0.7447 0.4927 l
+0.7453 0.4928 l
+0.7460 0.4928 l
+0.7467 0.4929 l
+0.7473 0.4929 l
+0.7480 0.4929 l
+0.7487 0.4930 l
+0.7493 0.4930 l
+0.7500 0.4930 l
+0.7507 0.4931 l
+0.7513 0.4931 l
+0.7520 0.4931 l
+0.7527 0.4932 l
+0.7533 0.4932 l
+0.7540 0.4933 l
+0.7547 0.4933 l
+0.7553 0.4933 l
+0.7560 0.4934 l
+0.7567 0.4934 l
+0.7573 0.4934 l
+0.7580 0.4935 l
+0.7587 0.4935 l
+0.7593 0.4935 l
+0.7600 0.4936 l
+0.7607 0.4936 l
+0.7613 0.4936 l
+0.7620 0.4937 l
+0.7627 0.4937 l
+0.7633 0.4937 l
+0.7640 0.4938 l
+0.7647 0.4938 l
+0.7653 0.4938 l
+0.7660 0.4938 l
+0.7667 0.4939 l
+0.7673 0.4939 l
+0.7680 0.4939 l
+0.7687 0.4940 l
+0.7693 0.4940 l
+0.7700 0.4940 l
+0.7707 0.4941 l
+0.7713 0.4941 l
+0.7720 0.4941 l
+0.7727 0.4942 l
+0.7733 0.4942 l
+0.7740 0.4942 l
+0.7747 0.4942 l
+0.7753 0.4943 l
+0.7760 0.4943 l
+0.7767 0.4943 l
+0.7773 0.4944 l
+0.7780 0.4944 l
+0.7787 0.4944 l
+0.7793 0.4944 l
+0.7800 0.4945 l
+0.7807 0.4945 l
+0.7813 0.4945 l
+0.7820 0.4945 l
+0.7827 0.4946 l
+0.7833 0.4946 l
+0.7840 0.4946 l
+0.7847 0.4947 l
+0.7853 0.4947 l
+0.7860 0.4947 l
+0.7867 0.4947 l
+0.7873 0.4948 l
+0.7880 0.4948 l
+0.7887 0.4948 l
+0.7893 0.4948 l
+0.7900 0.4949 l
+0.7907 0.4949 l
+0.7913 0.4949 l
+0.7920 0.4949 l
+0.7927 0.4950 l
+0.7933 0.4950 l
+0.7940 0.4950 l
+0.7947 0.4950 l
+0.7953 0.4951 l
+0.7960 0.4951 l
+0.7967 0.4951 l
+0.7973 0.4951 l
+0.7980 0.4951 l
+0.7987 0.4952 l
+0.7993 0.4952 l
+0.8000 0.4952 l
+0.8007 0.4952 l
+0.8013 0.4953 l
+0.8020 0.4953 l
+0.8027 0.4953 l
+0.8033 0.4953 l
+0.8040 0.4953 l
+0.8047 0.4954 l
+0.8053 0.4954 l
+0.8060 0.4954 l
+0.8067 0.4954 l
+0.8073 0.4955 l
+0.8080 0.4955 l
+0.8087 0.4955 l
+0.8093 0.4955 l
+0.8100 0.4955 l
+0.8107 0.4956 l
+0.8113 0.4956 l
+0.8120 0.4956 l
+0.8127 0.4956 l
+0.8133 0.4956 l
+0.8140 0.4956 l
+0.8147 0.4957 l
+0.8153 0.4957 l
+0.8160 0.4957 l
+s
+[/DeviceRGB] SCS
+Color1 SC
+n
+0.1500 0.4974 m
+0.1567 0.4974 l
+0.1633 0.4974 l
+0.1700 0.4974 l
+0.1767 0.4974 l
+0.1833 0.4974 l
+0.1900 0.4974 l
+0.1967 0.4974 l
+0.2033 0.4974 l
+0.2100 0.4974 l
+0.2167 0.4974 l
+0.2233 0.4974 l
+0.2300 0.4974 l
+0.2367 0.4974 l
+0.2433 0.4974 l
+0.2500 0.4974 l
+0.2567 0.4974 l
+0.2633 0.4974 l
+0.2700 0.4974 l
+0.2767 0.4974 l
+0.2833 0.4974 l
+0.2900 0.4974 l
+0.2967 0.4974 l
+0.3033 0.4974 l
+0.3100 0.4974 l
+0.3167 0.4974 l
+0.3233 0.4974 l
+0.3300 0.4974 l
+0.3367 0.4974 l
+0.3433 0.4974 l
+0.3500 0.4974 l
+0.3567 0.4974 l
+0.3633 0.4974 l
+0.3700 0.4974 l
+0.3767 0.4974 l
+0.3833 0.4974 l
+0.3900 0.4974 l
+0.3967 0.4974 l
+0.4033 0.4974 l
+0.4100 0.4974 l
+0.4167 0.4974 l
+0.4233 0.4974 l
+0.4300 0.4974 l
+0.4367 0.4974 l
+0.4433 0.4974 l
+0.4500 0.4974 l
+0.4567 0.4974 l
+0.4633 0.4974 l
+0.4700 0.4974 l
+0.4767 0.4974 l
+0.4833 0.4974 l
+0.4900 0.4974 l
+0.4967 0.4974 l
+0.5033 0.4974 l
+0.5100 0.4974 l
+0.5167 0.4974 l
+0.5233 0.4974 l
+0.5300 0.4974 l
+0.5367 0.4974 l
+0.5433 0.4974 l
+0.5500 0.4974 l
+0.5567 0.4974 l
+0.5633 0.4974 l
+0.5700 0.4974 l
+0.5767 0.4974 l
+0.5833 0.4974 l
+0.5900 0.4974 l
+0.5967 0.4974 l
+0.6033 0.4974 l
+0.6100 0.4974 l
+0.6167 0.4974 l
+0.6233 0.4974 l
+0.6300 0.4974 l
+0.6367 0.4974 l
+0.6433 0.4974 l
+0.6500 0.4974 l
+0.6567 0.4974 l
+0.6633 0.4974 l
+0.6700 0.4974 l
+0.6767 0.4974 l
+0.6833 0.4974 l
+0.6900 0.4974 l
+0.6967 0.4974 l
+0.7033 0.4974 l
+0.7100 0.4974 l
+0.7167 0.4974 l
+0.7233 0.4974 l
+0.7300 0.4974 l
+0.7367 0.4974 l
+0.7433 0.4974 l
+0.7500 0.4974 l
+0.7567 0.4974 l
+0.7633 0.4974 l
+0.7700 0.4974 l
+0.7767 0.4974 l
+0.7833 0.4974 l
+0.7900 0.4974 l
+0.7967 0.4974 l
+0.8033 0.4974 l
+0.8100 0.4974 l
+0.8167 0.4974 l
+0.8233 0.4974 l
+0.8300 0.4974 l
+0.8367 0.4974 l
+0.8433 0.4974 l
+0.8500 0.4974 l
+0.8567 0.4974 l
+0.8633 0.4974 l
+0.8700 0.4974 l
+0.8767 0.4974 l
+0.8833 0.4974 l
+0.8900 0.4974 l
+0.8967 0.4974 l
+0.9033 0.4974 l
+0.9100 0.4974 l
+0.9167 0.4974 l
+0.9233 0.4974 l
+0.9300 0.4974 l
+0.9367 0.4974 l
+0.9433 0.4974 l
+0.9500 0.4974 l
+0.9567 0.4974 l
+0.9633 0.4974 l
+0.9700 0.4974 l
+0.9767 0.4974 l
+0.9833 0.4974 l
+0.9900 0.4974 l
+0.9967 0.4974 l
+1.0033 0.4974 l
+1.0100 0.4974 l
+1.0167 0.4974 l
+1.0233 0.4974 l
+1.0300 0.4974 l
+1.0367 0.4974 l
+1.0433 0.4974 l
+1.0500 0.4974 l
+1.0567 0.4974 l
+1.0633 0.4974 l
+1.0700 0.4974 l
+1.0767 0.4974 l
+1.0833 0.4974 l
+1.0900 0.4974 l
+1.0967 0.4974 l
+1.1033 0.4974 l
+1.1100 0.4974 l
+1.1167 0.4974 l
+1.1233 0.4974 l
+1.1300 0.4974 l
+1.1367 0.4974 l
+1.1433 0.4974 l
+1.1500 0.4974 l
+1.1501 0.4974 l
+s
+[/DeviceRGB] SCS
+Color2 SC
+n 0.1500 0.1925 0.0024 0.0024 0 360 EARC s
+n 0.1567 0.2031 0.0024 0.0024 0 360 EARC s
+n 0.1633 0.2139 0.0024 0.0024 0 360 EARC s
+n 0.1700 0.2235 0.0024 0.0024 0 360 EARC s
+n 0.1767 0.2308 0.0024 0.0024 0 360 EARC s
+n 0.1833 0.2383 0.0024 0.0024 0 360 EARC s
+n 0.1900 0.2463 0.0024 0.0024 0 360 EARC s
+n 0.1967 0.2500 0.0024 0.0024 0 360 EARC s
+n 0.2033 0.2540 0.0024 0.0024 0 360 EARC s
+n 0.2100 0.2580 0.0024 0.0024 0 360 EARC s
+n 0.2167 0.2623 0.0024 0.0024 0 360 EARC s
+n 0.2233 0.2666 0.0024 0.0024 0 360 EARC s
+n 0.2300 0.2704 0.0024 0.0024 0 360 EARC s
+n 0.2367 0.2752 0.0024 0.0024 0 360 EARC s
+n 0.2433 0.2803 0.0024 0.0024 0 360 EARC s
+n 0.2500 0.2851 0.0024 0.0024 0 360 EARC s
+n 0.2567 0.2902 0.0024 0.0024 0 360 EARC s
+n 0.2633 0.2956 0.0024 0.0024 0 360 EARC s
+n 0.2700 0.3013 0.0024 0.0024 0 360 EARC s
+n 0.2767 0.3072 0.0024 0.0024 0 360 EARC s
+n 0.2833 0.3134 0.0024 0.0024 0 360 EARC s
+n 0.2900 0.3198 0.0024 0.0024 0 360 EARC s
+n 0.2967 0.3265 0.0024 0.0024 0 360 EARC s
+n 0.3033 0.3334 0.0024 0.0024 0 360 EARC s
+n 0.3100 0.3405 0.0024 0.0024 0 360 EARC s
+n 0.3167 0.3476 0.0024 0.0024 0 360 EARC s
+n 0.3233 0.3548 0.0024 0.0024 0 360 EARC s
+n 0.3300 0.3618 0.0024 0.0024 0 360 EARC s
+n 0.3367 0.3687 0.0024 0.0024 0 360 EARC s
+n 0.3433 0.3753 0.0024 0.0024 0 360 EARC s
+n 0.3500 0.3817 0.0024 0.0024 0 360 EARC s
+n 0.3567 0.3879 0.0024 0.0024 0 360 EARC s
+n 0.3633 0.3938 0.0024 0.0024 0 360 EARC s
+n 0.3700 0.3993 0.0024 0.0024 0 360 EARC s
+n 0.3767 0.4045 0.0024 0.0024 0 360 EARC s
+n 0.3833 0.4094 0.0024 0.0024 0 360 EARC s
+n 0.3900 0.4141 0.0024 0.0024 0 360 EARC s
+n 0.3967 0.4186 0.0024 0.0024 0 360 EARC s
+n 0.4033 0.4228 0.0024 0.0024 0 360 EARC s
+n 0.4100 0.4269 0.0024 0.0024 0 360 EARC s
+n 0.4167 0.4308 0.0024 0.0024 0 360 EARC s
+n 0.4233 0.4345 0.0024 0.0024 0 360 EARC s
+n 0.4300 0.4381 0.0024 0.0024 0 360 EARC s
+n 0.4367 0.4415 0.0024 0.0024 0 360 EARC s
+n 0.4433 0.4447 0.0024 0.0024 0 360 EARC s
+n 0.4500 0.4477 0.0024 0.0024 0 360 EARC s
+n 0.4567 0.4505 0.0024 0.0024 0 360 EARC s
+n 0.4633 0.4530 0.0024 0.0024 0 360 EARC s
+n 0.4700 0.4554 0.0024 0.0024 0 360 EARC s
+n 0.4767 0.4577 0.0024 0.0024 0 360 EARC s
+n 0.4833 0.4598 0.0024 0.0024 0 360 EARC s
+n 0.4900 0.4617 0.0024 0.0024 0 360 EARC s
+n 0.4967 0.4635 0.0024 0.0024 0 360 EARC s
+n 0.5033 0.4652 0.0024 0.0024 0 360 EARC s
+n 0.5100 0.4668 0.0024 0.0024 0 360 EARC s
+n 0.5167 0.4683 0.0024 0.0024 0 360 EARC s
+n 0.5233 0.4695 0.0024 0.0024 0 360 EARC s
+n 0.5300 0.4706 0.0024 0.0024 0 360 EARC s
+n 0.5367 0.4717 0.0024 0.0024 0 360 EARC s
+n 0.5433 0.4727 0.0024 0.0024 0 360 EARC s
+n 0.5500 0.4737 0.0024 0.0024 0 360 EARC s
+n 0.5567 0.4746 0.0024 0.0024 0 360 EARC s
+n 0.5633 0.4755 0.0024 0.0024 0 360 EARC s
+n 0.5700 0.4764 0.0024 0.0024 0 360 EARC s
+n 0.5767 0.4772 0.0024 0.0024 0 360 EARC s
+n 0.5833 0.4780 0.0024 0.0024 0 360 EARC s
+n 0.5900 0.4789 0.0024 0.0024 0 360 EARC s
+n 0.5967 0.4797 0.0024 0.0024 0 360 EARC s
+n 0.6033 0.4805 0.0024 0.0024 0 360 EARC s
+n 0.6100 0.4812 0.0024 0.0024 0 360 EARC s
+n 0.6167 0.4820 0.0024 0.0024 0 360 EARC s
+n 0.6233 0.4827 0.0024 0.0024 0 360 EARC s
+n 0.6300 0.4834 0.0024 0.0024 0 360 EARC s
+n 0.6367 0.4841 0.0024 0.0024 0 360 EARC s
+n 0.6433 0.4848 0.0024 0.0024 0 360 EARC s
+n 0.6500 0.4855 0.0024 0.0024 0 360 EARC s
+n 0.6567 0.4861 0.0024 0.0024 0 360 EARC s
+n 0.6633 0.4867 0.0024 0.0024 0 360 EARC s
+n 0.6700 0.4873 0.0024 0.0024 0 360 EARC s
+n 0.6767 0.4879 0.0024 0.0024 0 360 EARC s
+n 0.6833 0.4885 0.0024 0.0024 0 360 EARC s
+n 0.6900 0.4890 0.0024 0.0024 0 360 EARC s
+n 0.6967 0.4895 0.0024 0.0024 0 360 EARC s
+n 0.7033 0.4900 0.0024 0.0024 0 360 EARC s
+n 0.7100 0.4905 0.0024 0.0024 0 360 EARC s
+n 0.7167 0.4910 0.0024 0.0024 0 360 EARC s
+n 0.7233 0.4914 0.0024 0.0024 0 360 EARC s
+n 0.7300 0.4918 0.0024 0.0024 0 360 EARC s
+n 0.7367 0.4922 0.0024 0.0024 0 360 EARC s
+n 0.7433 0.4926 0.0024 0.0024 0 360 EARC s
+n 0.7500 0.4930 0.0024 0.0024 0 360 EARC s
+n 0.7567 0.4934 0.0024 0.0024 0 360 EARC s
+n 0.7633 0.4937 0.0024 0.0024 0 360 EARC s
+n 0.7700 0.4940 0.0024 0.0024 0 360 EARC s
+n 0.7767 0.4943 0.0024 0.0024 0 360 EARC s
+n 0.7833 0.4946 0.0024 0.0024 0 360 EARC s
+n 0.7900 0.4948 0.0024 0.0024 0 360 EARC s
+n 0.7967 0.4951 0.0024 0.0024 0 360 EARC s
+n 0.8033 0.4953 0.0024 0.0024 0 360 EARC s
+n 0.8100 0.4955 0.0024 0.0024 0 360 EARC s
+n 0.8167 0.4957 0.0024 0.0024 0 360 EARC s
+n 0.8233 0.4959 0.0024 0.0024 0 360 EARC s
+n 0.8300 0.4960 0.0024 0.0024 0 360 EARC s
+n 0.8367 0.4962 0.0024 0.0024 0 360 EARC s
+n 0.8433 0.4963 0.0024 0.0024 0 360 EARC s
+n 0.8500 0.4965 0.0024 0.0024 0 360 EARC s
+n 0.8567 0.4966 0.0024 0.0024 0 360 EARC s
+n 0.8633 0.4967 0.0024 0.0024 0 360 EARC s
+n 0.8700 0.4968 0.0024 0.0024 0 360 EARC s
+n 0.8767 0.4968 0.0024 0.0024 0 360 EARC s
+n 0.8833 0.4969 0.0024 0.0024 0 360 EARC s
+n 0.8900 0.4970 0.0024 0.0024 0 360 EARC s
+n 0.8967 0.4970 0.0024 0.0024 0 360 EARC s
+n 0.9033 0.4971 0.0024 0.0024 0 360 EARC s
+n 0.9100 0.4971 0.0024 0.0024 0 360 EARC s
+n 0.9167 0.4972 0.0024 0.0024 0 360 EARC s
+n 0.9233 0.4972 0.0024 0.0024 0 360 EARC s
+n 0.9300 0.4972 0.0024 0.0024 0 360 EARC s
+n 0.9367 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9433 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9500 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9567 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9633 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9700 0.4974 0.0024 0.0024 0 360 EARC s
+n 0.9767 0.4974 0.0024 0.0024 0 360 EARC s
+n 0.9833 0.4974 0.0024 0.0024 0 360 EARC s
+n 0.9900 0.4974 0.0024 0.0024 0 360 EARC s
+n 0.9967 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0033 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0100 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0167 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0233 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0300 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0367 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0433 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0500 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0567 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0633 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0700 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0767 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0833 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0900 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0967 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1033 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1100 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1167 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1233 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1300 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1367 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1433 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1500 0.4974 0.0024 0.0024 0 360 EARC s
+n 0.1500 0.8138 0.0024 0.0024 0 360 EARC s
+n 0.1567 0.8063 0.0024 0.0024 0 360 EARC s
+n 0.1633 0.7983 0.0024 0.0024 0 360 EARC s
+n 0.1700 0.7896 0.0024 0.0024 0 360 EARC s
+n 0.1767 0.7806 0.0024 0.0024 0 360 EARC s
+n 0.1833 0.7709 0.0024 0.0024 0 360 EARC s
+n 0.1900 0.7611 0.0024 0.0024 0 360 EARC s
+n 0.1967 0.7505 0.0024 0.0024 0 360 EARC s
+n 0.2033 0.7456 0.0024 0.0024 0 360 EARC s
+n 0.2100 0.7419 0.0024 0.0024 0 360 EARC s
+n 0.2167 0.7388 0.0024 0.0024 0 360 EARC s
+n 0.2233 0.7366 0.0024 0.0024 0 360 EARC s
+n 0.2300 0.7344 0.0024 0.0024 0 360 EARC s
+n 0.2367 0.7318 0.0024 0.0024 0 360 EARC s
+n 0.2433 0.7287 0.0024 0.0024 0 360 EARC s
+n 0.2500 0.7249 0.0024 0.0024 0 360 EARC s
+n 0.2567 0.7206 0.0024 0.0024 0 360 EARC s
+n 0.2633 0.7152 0.0024 0.0024 0 360 EARC s
+n 0.2700 0.7092 0.0024 0.0024 0 360 EARC s
+n 0.2767 0.7021 0.0024 0.0024 0 360 EARC s
+n 0.2833 0.6943 0.0024 0.0024 0 360 EARC s
+n 0.2900 0.6855 0.0024 0.0024 0 360 EARC s
+n 0.2967 0.6760 0.0024 0.0024 0 360 EARC s
+n 0.3033 0.6663 0.0024 0.0024 0 360 EARC s
+n 0.3100 0.6566 0.0024 0.0024 0 360 EARC s
+n 0.3167 0.6470 0.0024 0.0024 0 360 EARC s
+n 0.3233 0.6380 0.0024 0.0024 0 360 EARC s
+n 0.3300 0.6293 0.0024 0.0024 0 360 EARC s
+n 0.3367 0.6211 0.0024 0.0024 0 360 EARC s
+n 0.3433 0.6135 0.0024 0.0024 0 360 EARC s
+n 0.3500 0.6062 0.0024 0.0024 0 360 EARC s
+n 0.3567 0.5993 0.0024 0.0024 0 360 EARC s
+n 0.3633 0.5929 0.0024 0.0024 0 360 EARC s
+n 0.3700 0.5870 0.0024 0.0024 0 360 EARC s
+n 0.3767 0.5816 0.0024 0.0024 0 360 EARC s
+n 0.3833 0.5766 0.0024 0.0024 0 360 EARC s
+n 0.3900 0.5719 0.0024 0.0024 0 360 EARC s
+n 0.3967 0.5678 0.0024 0.0024 0 360 EARC s
+n 0.4033 0.5640 0.0024 0.0024 0 360 EARC s
+n 0.4100 0.5606 0.0024 0.0024 0 360 EARC s
+n 0.4167 0.5574 0.0024 0.0024 0 360 EARC s
+n 0.4233 0.5544 0.0024 0.0024 0 360 EARC s
+n 0.4300 0.5516 0.0024 0.0024 0 360 EARC s
+n 0.4367 0.5489 0.0024 0.0024 0 360 EARC s
+n 0.4433 0.5463 0.0024 0.0024 0 360 EARC s
+n 0.4500 0.5439 0.0024 0.0024 0 360 EARC s
+n 0.4567 0.5416 0.0024 0.0024 0 360 EARC s
+n 0.4633 0.5394 0.0024 0.0024 0 360 EARC s
+n 0.4700 0.5373 0.0024 0.0024 0 360 EARC s
+n 0.4767 0.5354 0.0024 0.0024 0 360 EARC s
+n 0.4833 0.5335 0.0024 0.0024 0 360 EARC s
+n 0.4900 0.5317 0.0024 0.0024 0 360 EARC s
+n 0.4967 0.5300 0.0024 0.0024 0 360 EARC s
+n 0.5033 0.5284 0.0024 0.0024 0 360 EARC s
+n 0.5100 0.5270 0.0024 0.0024 0 360 EARC s
+n 0.5167 0.5258 0.0024 0.0024 0 360 EARC s
+n 0.5233 0.5246 0.0024 0.0024 0 360 EARC s
+n 0.5300 0.5235 0.0024 0.0024 0 360 EARC s
+n 0.5367 0.5224 0.0024 0.0024 0 360 EARC s
+n 0.5433 0.5214 0.0024 0.0024 0 360 EARC s
+n 0.5500 0.5204 0.0024 0.0024 0 360 EARC s
+n 0.5567 0.5194 0.0024 0.0024 0 360 EARC s
+n 0.5633 0.5184 0.0024 0.0024 0 360 EARC s
+n 0.5700 0.5175 0.0024 0.0024 0 360 EARC s
+n 0.5767 0.5166 0.0024 0.0024 0 360 EARC s
+n 0.5833 0.5157 0.0024 0.0024 0 360 EARC s
+n 0.5900 0.5149 0.0024 0.0024 0 360 EARC s
+n 0.5967 0.5142 0.0024 0.0024 0 360 EARC s
+n 0.6033 0.5135 0.0024 0.0024 0 360 EARC s
+n 0.6100 0.5128 0.0024 0.0024 0 360 EARC s
+n 0.6167 0.5121 0.0024 0.0024 0 360 EARC s
+n 0.6233 0.5114 0.0024 0.0024 0 360 EARC s
+n 0.6300 0.5107 0.0024 0.0024 0 360 EARC s
+n 0.6367 0.5101 0.0024 0.0024 0 360 EARC s
+n 0.6433 0.5094 0.0024 0.0024 0 360 EARC s
+n 0.6500 0.5088 0.0024 0.0024 0 360 EARC s
+n 0.6567 0.5082 0.0024 0.0024 0 360 EARC s
+n 0.6633 0.5076 0.0024 0.0024 0 360 EARC s
+n 0.6700 0.5070 0.0024 0.0024 0 360 EARC s
+n 0.6767 0.5064 0.0024 0.0024 0 360 EARC s
+n 0.6833 0.5058 0.0024 0.0024 0 360 EARC s
+n 0.6900 0.5053 0.0024 0.0024 0 360 EARC s
+n 0.6967 0.5048 0.0024 0.0024 0 360 EARC s
+n 0.7033 0.5043 0.0024 0.0024 0 360 EARC s
+n 0.7100 0.5038 0.0024 0.0024 0 360 EARC s
+n 0.7167 0.5033 0.0024 0.0024 0 360 EARC s
+n 0.7233 0.5029 0.0024 0.0024 0 360 EARC s
+n 0.7300 0.5025 0.0024 0.0024 0 360 EARC s
+n 0.7367 0.5021 0.0024 0.0024 0 360 EARC s
+n 0.7433 0.5017 0.0024 0.0024 0 360 EARC s
+n 0.7500 0.5013 0.0024 0.0024 0 360 EARC s
+n 0.7567 0.5010 0.0024 0.0024 0 360 EARC s
+n 0.7633 0.5007 0.0024 0.0024 0 360 EARC s
+n 0.7700 0.5004 0.0024 0.0024 0 360 EARC s
+n 0.7767 0.5002 0.0024 0.0024 0 360 EARC s
+n 0.7833 0.4999 0.0024 0.0024 0 360 EARC s
+n 0.7900 0.4997 0.0024 0.0024 0 360 EARC s
+n 0.7967 0.4995 0.0024 0.0024 0 360 EARC s
+n 0.8033 0.4993 0.0024 0.0024 0 360 EARC s
+n 0.8100 0.4991 0.0024 0.0024 0 360 EARC s
+n 0.8167 0.4990 0.0024 0.0024 0 360 EARC s
+n 0.8233 0.4988 0.0024 0.0024 0 360 EARC s
+n 0.8300 0.4987 0.0024 0.0024 0 360 EARC s
+n 0.8367 0.4986 0.0024 0.0024 0 360 EARC s
+n 0.8433 0.4984 0.0024 0.0024 0 360 EARC s
+n 0.8500 0.4983 0.0024 0.0024 0 360 EARC s
+n 0.8567 0.4982 0.0024 0.0024 0 360 EARC s
+n 0.8633 0.4981 0.0024 0.0024 0 360 EARC s
+n 0.8700 0.4981 0.0024 0.0024 0 360 EARC s
+n 0.8767 0.4980 0.0024 0.0024 0 360 EARC s
+n 0.8833 0.4979 0.0024 0.0024 0 360 EARC s
+n 0.8900 0.4978 0.0024 0.0024 0 360 EARC s
+n 0.8967 0.4978 0.0024 0.0024 0 360 EARC s
+n 0.9033 0.4977 0.0024 0.0024 0 360 EARC s
+n 0.9100 0.4977 0.0024 0.0024 0 360 EARC s
+n 0.9167 0.4977 0.0024 0.0024 0 360 EARC s
+n 0.9233 0.4976 0.0024 0.0024 0 360 EARC s
+n 0.9300 0.4976 0.0024 0.0024 0 360 EARC s
+n 0.9367 0.4976 0.0024 0.0024 0 360 EARC s
+n 0.9433 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9500 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9567 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9633 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9700 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9767 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9833 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9900 0.4974 0.0024 0.0024 0 360 EARC s
+n 0.9967 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0033 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0100 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0167 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0233 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0300 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0367 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0433 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0500 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0567 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0633 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0700 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0767 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0833 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0900 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0967 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1033 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1100 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1167 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1233 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1300 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1367 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1433 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1500 0.4974 0.0024 0.0024 0 360 EARC s
+[/DeviceRGB] SCS
+Color4 SC
+n 0.1500 0.1925 0.0024 0.0024 0 360 EARC s
+n 0.1567 0.2028 0.0024 0.0024 0 360 EARC s
+n 0.1633 0.2124 0.0024 0.0024 0 360 EARC s
+n 0.1700 0.2213 0.0024 0.0024 0 360 EARC s
+n 0.1767 0.2297 0.0024 0.0024 0 360 EARC s
+n 0.1833 0.2378 0.0024 0.0024 0 360 EARC s
+n 0.1900 0.2457 0.0024 0.0024 0 360 EARC s
+n 0.1967 0.2507 0.0024 0.0024 0 360 EARC s
+n 0.2033 0.2545 0.0024 0.0024 0 360 EARC s
+n 0.2100 0.2583 0.0024 0.0024 0 360 EARC s
+n 0.2167 0.2622 0.0024 0.0024 0 360 EARC s
+n 0.2233 0.2659 0.0024 0.0024 0 360 EARC s
+n 0.2300 0.2678 0.0024 0.0024 0 360 EARC s
+n 0.2367 0.2703 0.0024 0.0024 0 360 EARC s
+n 0.2433 0.2734 0.0024 0.0024 0 360 EARC s
+n 0.2500 0.2769 0.0024 0.0024 0 360 EARC s
+n 0.2567 0.2810 0.0024 0.0024 0 360 EARC s
+n 0.2633 0.2855 0.0024 0.0024 0 360 EARC s
+n 0.2700 0.2905 0.0024 0.0024 0 360 EARC s
+n 0.2767 0.2958 0.0024 0.0024 0 360 EARC s
+n 0.2833 0.3004 0.0024 0.0024 0 360 EARC s
+n 0.2900 0.3052 0.0024 0.0024 0 360 EARC s
+n 0.2967 0.3103 0.0024 0.0024 0 360 EARC s
+n 0.3033 0.3156 0.0024 0.0024 0 360 EARC s
+n 0.3100 0.3211 0.0024 0.0024 0 360 EARC s
+n 0.3167 0.3266 0.0024 0.0024 0 360 EARC s
+n 0.3233 0.3323 0.0024 0.0024 0 360 EARC s
+n 0.3300 0.3380 0.0024 0.0024 0 360 EARC s
+n 0.3367 0.3436 0.0024 0.0024 0 360 EARC s
+n 0.3433 0.3492 0.0024 0.0024 0 360 EARC s
+n 0.3500 0.3548 0.0024 0.0024 0 360 EARC s
+n 0.3567 0.3603 0.0024 0.0024 0 360 EARC s
+n 0.3633 0.3658 0.0024 0.0024 0 360 EARC s
+n 0.3700 0.3711 0.0024 0.0024 0 360 EARC s
+n 0.3767 0.3764 0.0024 0.0024 0 360 EARC s
+n 0.3833 0.3815 0.0024 0.0024 0 360 EARC s
+n 0.3900 0.3866 0.0024 0.0024 0 360 EARC s
+n 0.3967 0.3915 0.0024 0.0024 0 360 EARC s
+n 0.4033 0.3963 0.0024 0.0024 0 360 EARC s
+n 0.4100 0.4010 0.0024 0.0024 0 360 EARC s
+n 0.4167 0.4055 0.0024 0.0024 0 360 EARC s
+n 0.4233 0.4100 0.0024 0.0024 0 360 EARC s
+n 0.4300 0.4143 0.0024 0.0024 0 360 EARC s
+n 0.4367 0.4185 0.0024 0.0024 0 360 EARC s
+n 0.4433 0.4225 0.0024 0.0024 0 360 EARC s
+n 0.4500 0.4264 0.0024 0.0024 0 360 EARC s
+n 0.4567 0.4301 0.0024 0.0024 0 360 EARC s
+n 0.4633 0.4337 0.0024 0.0024 0 360 EARC s
+n 0.4700 0.4370 0.0024 0.0024 0 360 EARC s
+n 0.4767 0.4403 0.0024 0.0024 0 360 EARC s
+n 0.4833 0.4434 0.0024 0.0024 0 360 EARC s
+n 0.4900 0.4463 0.0024 0.0024 0 360 EARC s
+n 0.4967 0.4492 0.0024 0.0024 0 360 EARC s
+n 0.5033 0.4520 0.0024 0.0024 0 360 EARC s
+n 0.5100 0.4547 0.0024 0.0024 0 360 EARC s
+n 0.5167 0.4573 0.0024 0.0024 0 360 EARC s
+n 0.5233 0.4598 0.0024 0.0024 0 360 EARC s
+n 0.5300 0.4623 0.0024 0.0024 0 360 EARC s
+n 0.5367 0.4646 0.0024 0.0024 0 360 EARC s
+n 0.5433 0.4668 0.0024 0.0024 0 360 EARC s
+n 0.5500 0.4689 0.0024 0.0024 0 360 EARC s
+n 0.5567 0.4709 0.0024 0.0024 0 360 EARC s
+n 0.5633 0.4727 0.0024 0.0024 0 360 EARC s
+n 0.5700 0.4744 0.0024 0.0024 0 360 EARC s
+n 0.5767 0.4760 0.0024 0.0024 0 360 EARC s
+n 0.5833 0.4776 0.0024 0.0024 0 360 EARC s
+n 0.5900 0.4789 0.0024 0.0024 0 360 EARC s
+n 0.5967 0.4802 0.0024 0.0024 0 360 EARC s
+n 0.6033 0.4814 0.0024 0.0024 0 360 EARC s
+n 0.6100 0.4825 0.0024 0.0024 0 360 EARC s
+n 0.6167 0.4835 0.0024 0.0024 0 360 EARC s
+n 0.6233 0.4844 0.0024 0.0024 0 360 EARC s
+n 0.6300 0.4852 0.0024 0.0024 0 360 EARC s
+n 0.6367 0.4860 0.0024 0.0024 0 360 EARC s
+n 0.6433 0.4868 0.0024 0.0024 0 360 EARC s
+n 0.6500 0.4874 0.0024 0.0024 0 360 EARC s
+n 0.6567 0.4881 0.0024 0.0024 0 360 EARC s
+n 0.6633 0.4887 0.0024 0.0024 0 360 EARC s
+n 0.6700 0.4892 0.0024 0.0024 0 360 EARC s
+n 0.6767 0.4897 0.0024 0.0024 0 360 EARC s
+n 0.6833 0.4902 0.0024 0.0024 0 360 EARC s
+n 0.6900 0.4907 0.0024 0.0024 0 360 EARC s
+n 0.6967 0.4911 0.0024 0.0024 0 360 EARC s
+n 0.7033 0.4915 0.0024 0.0024 0 360 EARC s
+n 0.7100 0.4919 0.0024 0.0024 0 360 EARC s
+n 0.7167 0.4922 0.0024 0.0024 0 360 EARC s
+n 0.7233 0.4926 0.0024 0.0024 0 360 EARC s
+n 0.7300 0.4929 0.0024 0.0024 0 360 EARC s
+n 0.7367 0.4932 0.0024 0.0024 0 360 EARC s
+n 0.7433 0.4935 0.0024 0.0024 0 360 EARC s
+n 0.7500 0.4938 0.0024 0.0024 0 360 EARC s
+n 0.7567 0.4940 0.0024 0.0024 0 360 EARC s
+n 0.7633 0.4943 0.0024 0.0024 0 360 EARC s
+n 0.7700 0.4945 0.0024 0.0024 0 360 EARC s
+n 0.7767 0.4947 0.0024 0.0024 0 360 EARC s
+n 0.7833 0.4949 0.0024 0.0024 0 360 EARC s
+n 0.7900 0.4951 0.0024 0.0024 0 360 EARC s
+n 0.7967 0.4953 0.0024 0.0024 0 360 EARC s
+n 0.8033 0.4955 0.0024 0.0024 0 360 EARC s
+n 0.8100 0.4957 0.0024 0.0024 0 360 EARC s
+n 0.8167 0.4958 0.0024 0.0024 0 360 EARC s
+n 0.8233 0.4960 0.0024 0.0024 0 360 EARC s
+n 0.8300 0.4961 0.0024 0.0024 0 360 EARC s
+n 0.8367 0.4962 0.0024 0.0024 0 360 EARC s
+n 0.8433 0.4963 0.0024 0.0024 0 360 EARC s
+n 0.8500 0.4964 0.0024 0.0024 0 360 EARC s
+n 0.8567 0.4965 0.0024 0.0024 0 360 EARC s
+n 0.8633 0.4966 0.0024 0.0024 0 360 EARC s
+n 0.8700 0.4967 0.0024 0.0024 0 360 EARC s
+n 0.8767 0.4968 0.0024 0.0024 0 360 EARC s
+n 0.8833 0.4969 0.0024 0.0024 0 360 EARC s
+n 0.8900 0.4969 0.0024 0.0024 0 360 EARC s
+n 0.8967 0.4970 0.0024 0.0024 0 360 EARC s
+n 0.9033 0.4970 0.0024 0.0024 0 360 EARC s
+n 0.9100 0.4971 0.0024 0.0024 0 360 EARC s
+n 0.9167 0.4971 0.0024 0.0024 0 360 EARC s
+n 0.9233 0.4972 0.0024 0.0024 0 360 EARC s
+n 0.9300 0.4972 0.0024 0.0024 0 360 EARC s
+n 0.9367 0.4972 0.0024 0.0024 0 360 EARC s
+n 0.9433 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9500 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9567 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9633 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9700 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9767 0.4974 0.0024 0.0024 0 360 EARC s
+n 0.9833 0.4974 0.0024 0.0024 0 360 EARC s
+n 0.9900 0.4974 0.0024 0.0024 0 360 EARC s
+n 0.9967 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0033 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0100 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0167 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0233 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0300 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0367 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0433 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0500 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0567 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0633 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0700 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0767 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0833 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0900 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0967 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1033 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1100 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1167 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1233 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1300 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1367 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1433 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1500 0.4974 0.0024 0.0024 0 360 EARC s
+n 0.1500 0.8138 0.0024 0.0024 0 360 EARC s
+n 0.1567 0.8063 0.0024 0.0024 0 360 EARC s
+n 0.1633 0.7982 0.0024 0.0024 0 360 EARC s
+n 0.1700 0.7889 0.0024 0.0024 0 360 EARC s
+n 0.1767 0.7785 0.0024 0.0024 0 360 EARC s
+n 0.1833 0.7677 0.0024 0.0024 0 360 EARC s
+n 0.1900 0.7564 0.0024 0.0024 0 360 EARC s
+n 0.1967 0.7488 0.0024 0.0024 0 360 EARC s
+n 0.2033 0.7451 0.0024 0.0024 0 360 EARC s
+n 0.2100 0.7417 0.0024 0.0024 0 360 EARC s
+n 0.2167 0.7379 0.0024 0.0024 0 360 EARC s
+n 0.2233 0.7338 0.0024 0.0024 0 360 EARC s
+n 0.2300 0.7309 0.0024 0.0024 0 360 EARC s
+n 0.2367 0.7281 0.0024 0.0024 0 360 EARC s
+n 0.2433 0.7250 0.0024 0.0024 0 360 EARC s
+n 0.2500 0.7216 0.0024 0.0024 0 360 EARC s
+n 0.2567 0.7179 0.0024 0.0024 0 360 EARC s
+n 0.2633 0.7138 0.0024 0.0024 0 360 EARC s
+n 0.2700 0.7095 0.0024 0.0024 0 360 EARC s
+n 0.2767 0.7049 0.0024 0.0024 0 360 EARC s
+n 0.2833 0.6999 0.0024 0.0024 0 360 EARC s
+n 0.2900 0.6947 0.0024 0.0024 0 360 EARC s
+n 0.2967 0.6892 0.0024 0.0024 0 360 EARC s
+n 0.3033 0.6833 0.0024 0.0024 0 360 EARC s
+n 0.3100 0.6772 0.0024 0.0024 0 360 EARC s
+n 0.3167 0.6709 0.0024 0.0024 0 360 EARC s
+n 0.3233 0.6645 0.0024 0.0024 0 360 EARC s
+n 0.3300 0.6581 0.0024 0.0024 0 360 EARC s
+n 0.3367 0.6517 0.0024 0.0024 0 360 EARC s
+n 0.3433 0.6454 0.0024 0.0024 0 360 EARC s
+n 0.3500 0.6391 0.0024 0.0024 0 360 EARC s
+n 0.3567 0.6328 0.0024 0.0024 0 360 EARC s
+n 0.3633 0.6267 0.0024 0.0024 0 360 EARC s
+n 0.3700 0.6208 0.0024 0.0024 0 360 EARC s
+n 0.3767 0.6149 0.0024 0.0024 0 360 EARC s
+n 0.3833 0.6093 0.0024 0.0024 0 360 EARC s
+n 0.3900 0.6037 0.0024 0.0024 0 360 EARC s
+n 0.3967 0.5984 0.0024 0.0024 0 360 EARC s
+n 0.4033 0.5932 0.0024 0.0024 0 360 EARC s
+n 0.4100 0.5882 0.0024 0.0024 0 360 EARC s
+n 0.4167 0.5833 0.0024 0.0024 0 360 EARC s
+n 0.4233 0.5787 0.0024 0.0024 0 360 EARC s
+n 0.4300 0.5742 0.0024 0.0024 0 360 EARC s
+n 0.4367 0.5699 0.0024 0.0024 0 360 EARC s
+n 0.4433 0.5657 0.0024 0.0024 0 360 EARC s
+n 0.4500 0.5618 0.0024 0.0024 0 360 EARC s
+n 0.4567 0.5581 0.0024 0.0024 0 360 EARC s
+n 0.4633 0.5546 0.0024 0.0024 0 360 EARC s
+n 0.4700 0.5513 0.0024 0.0024 0 360 EARC s
+n 0.4767 0.5481 0.0024 0.0024 0 360 EARC s
+n 0.4833 0.5452 0.0024 0.0024 0 360 EARC s
+n 0.4900 0.5423 0.0024 0.0024 0 360 EARC s
+n 0.4967 0.5396 0.0024 0.0024 0 360 EARC s
+n 0.5033 0.5370 0.0024 0.0024 0 360 EARC s
+n 0.5100 0.5345 0.0024 0.0024 0 360 EARC s
+n 0.5167 0.5321 0.0024 0.0024 0 360 EARC s
+n 0.5233 0.5298 0.0024 0.0024 0 360 EARC s
+n 0.5300 0.5277 0.0024 0.0024 0 360 EARC s
+n 0.5367 0.5256 0.0024 0.0024 0 360 EARC s
+n 0.5433 0.5237 0.0024 0.0024 0 360 EARC s
+n 0.5500 0.5219 0.0024 0.0024 0 360 EARC s
+n 0.5567 0.5202 0.0024 0.0024 0 360 EARC s
+n 0.5633 0.5186 0.0024 0.0024 0 360 EARC s
+n 0.5700 0.5171 0.0024 0.0024 0 360 EARC s
+n 0.5767 0.5158 0.0024 0.0024 0 360 EARC s
+n 0.5833 0.5146 0.0024 0.0024 0 360 EARC s
+n 0.5900 0.5134 0.0024 0.0024 0 360 EARC s
+n 0.5967 0.5124 0.0024 0.0024 0 360 EARC s
+n 0.6033 0.5114 0.0024 0.0024 0 360 EARC s
+n 0.6100 0.5106 0.0024 0.0024 0 360 EARC s
+n 0.6167 0.5098 0.0024 0.0024 0 360 EARC s
+n 0.6233 0.5090 0.0024 0.0024 0 360 EARC s
+n 0.6300 0.5084 0.0024 0.0024 0 360 EARC s
+n 0.6367 0.5077 0.0024 0.0024 0 360 EARC s
+n 0.6433 0.5072 0.0024 0.0024 0 360 EARC s
+n 0.6500 0.5066 0.0024 0.0024 0 360 EARC s
+n 0.6567 0.5061 0.0024 0.0024 0 360 EARC s
+n 0.6633 0.5056 0.0024 0.0024 0 360 EARC s
+n 0.6700 0.5052 0.0024 0.0024 0 360 EARC s
+n 0.6767 0.5047 0.0024 0.0024 0 360 EARC s
+n 0.6833 0.5043 0.0024 0.0024 0 360 EARC s
+n 0.6900 0.5039 0.0024 0.0024 0 360 EARC s
+n 0.6967 0.5035 0.0024 0.0024 0 360 EARC s
+n 0.7033 0.5032 0.0024 0.0024 0 360 EARC s
+n 0.7100 0.5028 0.0024 0.0024 0 360 EARC s
+n 0.7167 0.5025 0.0024 0.0024 0 360 EARC s
+n 0.7233 0.5022 0.0024 0.0024 0 360 EARC s
+n 0.7300 0.5019 0.0024 0.0024 0 360 EARC s
+n 0.7367 0.5016 0.0024 0.0024 0 360 EARC s
+n 0.7433 0.5013 0.0024 0.0024 0 360 EARC s
+n 0.7500 0.5010 0.0024 0.0024 0 360 EARC s
+n 0.7567 0.5008 0.0024 0.0024 0 360 EARC s
+n 0.7633 0.5005 0.0024 0.0024 0 360 EARC s
+n 0.7700 0.5003 0.0024 0.0024 0 360 EARC s
+n 0.7767 0.5001 0.0024 0.0024 0 360 EARC s
+n 0.7833 0.4999 0.0024 0.0024 0 360 EARC s
+n 0.7900 0.4997 0.0024 0.0024 0 360 EARC s
+n 0.7967 0.4995 0.0024 0.0024 0 360 EARC s
+n 0.8033 0.4993 0.0024 0.0024 0 360 EARC s
+n 0.8100 0.4992 0.0024 0.0024 0 360 EARC s
+n 0.8167 0.4990 0.0024 0.0024 0 360 EARC s
+n 0.8233 0.4989 0.0024 0.0024 0 360 EARC s
+n 0.8300 0.4987 0.0024 0.0024 0 360 EARC s
+n 0.8367 0.4986 0.0024 0.0024 0 360 EARC s
+n 0.8433 0.4985 0.0024 0.0024 0 360 EARC s
+n 0.8500 0.4984 0.0024 0.0024 0 360 EARC s
+n 0.8567 0.4983 0.0024 0.0024 0 360 EARC s
+n 0.8633 0.4982 0.0024 0.0024 0 360 EARC s
+n 0.8700 0.4981 0.0024 0.0024 0 360 EARC s
+n 0.8767 0.4980 0.0024 0.0024 0 360 EARC s
+n 0.8833 0.4980 0.0024 0.0024 0 360 EARC s
+n 0.8900 0.4979 0.0024 0.0024 0 360 EARC s
+n 0.8967 0.4978 0.0024 0.0024 0 360 EARC s
+n 0.9033 0.4978 0.0024 0.0024 0 360 EARC s
+n 0.9100 0.4977 0.0024 0.0024 0 360 EARC s
+n 0.9167 0.4977 0.0024 0.0024 0 360 EARC s
+n 0.9233 0.4977 0.0024 0.0024 0 360 EARC s
+n 0.9300 0.4976 0.0024 0.0024 0 360 EARC s
+n 0.9367 0.4976 0.0024 0.0024 0 360 EARC s
+n 0.9433 0.4976 0.0024 0.0024 0 360 EARC s
+n 0.9500 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9567 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9633 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9700 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9767 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9833 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9900 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9967 0.4975 0.0024 0.0024 0 360 EARC s
+n 1.0033 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0100 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0167 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0233 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0300 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0367 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0433 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0500 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0567 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0633 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0700 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0767 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0833 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0900 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0967 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1033 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1100 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1167 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1233 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1300 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1367 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1433 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1500 0.4974 0.0024 0.0024 0 360 EARC s
+[/DeviceRGB] SCS
+Color3 SC
+n 0.1500 0.1925 0.0024 0.0024 0 360 EARC s
+n 0.1567 0.2048 0.0024 0.0024 0 360 EARC s
+n 0.1633 0.2034 0.0024 0.0024 0 360 EARC s
+n 0.1700 0.2006 0.0024 0.0024 0 360 EARC s
+n 0.1767 0.1988 0.0024 0.0024 0 360 EARC s
+n 0.1833 0.1977 0.0024 0.0024 0 360 EARC s
+n 0.1900 0.1974 0.0024 0.0024 0 360 EARC s
+n 0.1967 0.1976 0.0024 0.0024 0 360 EARC s
+n 0.2033 0.1987 0.0024 0.0024 0 360 EARC s
+n 0.2100 0.2001 0.0024 0.0024 0 360 EARC s
+n 0.2167 0.2023 0.0024 0.0024 0 360 EARC s
+n 0.2233 0.2050 0.0024 0.0024 0 360 EARC s
+n 0.2300 0.2086 0.0024 0.0024 0 360 EARC s
+n 0.2367 0.2127 0.0024 0.0024 0 360 EARC s
+n 0.2433 0.2176 0.0024 0.0024 0 360 EARC s
+n 0.2500 0.2226 0.0024 0.0024 0 360 EARC s
+n 0.2567 0.2272 0.0024 0.0024 0 360 EARC s
+n 0.2633 0.2320 0.0024 0.0024 0 360 EARC s
+n 0.2700 0.2372 0.0024 0.0024 0 360 EARC s
+n 0.2767 0.2427 0.0024 0.0024 0 360 EARC s
+n 0.2833 0.2484 0.0024 0.0024 0 360 EARC s
+n 0.2900 0.2543 0.0024 0.0024 0 360 EARC s
+n 0.2967 0.2604 0.0024 0.0024 0 360 EARC s
+n 0.3033 0.2670 0.0024 0.0024 0 360 EARC s
+n 0.3100 0.2740 0.0024 0.0024 0 360 EARC s
+n 0.3167 0.2812 0.0024 0.0024 0 360 EARC s
+n 0.3233 0.2888 0.0024 0.0024 0 360 EARC s
+n 0.3300 0.2967 0.0024 0.0024 0 360 EARC s
+n 0.3367 0.3047 0.0024 0.0024 0 360 EARC s
+n 0.3433 0.3128 0.0024 0.0024 0 360 EARC s
+n 0.3500 0.3210 0.0024 0.0024 0 360 EARC s
+n 0.3567 0.3291 0.0024 0.0024 0 360 EARC s
+n 0.3633 0.3373 0.0024 0.0024 0 360 EARC s
+n 0.3700 0.3452 0.0024 0.0024 0 360 EARC s
+n 0.3767 0.3530 0.0024 0.0024 0 360 EARC s
+n 0.3833 0.3607 0.0024 0.0024 0 360 EARC s
+n 0.3900 0.3682 0.0024 0.0024 0 360 EARC s
+n 0.3967 0.3754 0.0024 0.0024 0 360 EARC s
+n 0.4033 0.3823 0.0024 0.0024 0 360 EARC s
+n 0.4100 0.3888 0.0024 0.0024 0 360 EARC s
+n 0.4167 0.3948 0.0024 0.0024 0 360 EARC s
+n 0.4233 0.4007 0.0024 0.0024 0 360 EARC s
+n 0.4300 0.4061 0.0024 0.0024 0 360 EARC s
+n 0.4367 0.4111 0.0024 0.0024 0 360 EARC s
+n 0.4433 0.4159 0.0024 0.0024 0 360 EARC s
+n 0.4500 0.4204 0.0024 0.0024 0 360 EARC s
+n 0.4567 0.4246 0.0024 0.0024 0 360 EARC s
+n 0.4633 0.4285 0.0024 0.0024 0 360 EARC s
+n 0.4700 0.4323 0.0024 0.0024 0 360 EARC s
+n 0.4767 0.4359 0.0024 0.0024 0 360 EARC s
+n 0.4833 0.4392 0.0024 0.0024 0 360 EARC s
+n 0.4900 0.4425 0.0024 0.0024 0 360 EARC s
+n 0.4967 0.4456 0.0024 0.0024 0 360 EARC s
+n 0.5033 0.4485 0.0024 0.0024 0 360 EARC s
+n 0.5100 0.4514 0.0024 0.0024 0 360 EARC s
+n 0.5167 0.4541 0.0024 0.0024 0 360 EARC s
+n 0.5233 0.4567 0.0024 0.0024 0 360 EARC s
+n 0.5300 0.4592 0.0024 0.0024 0 360 EARC s
+n 0.5367 0.4616 0.0024 0.0024 0 360 EARC s
+n 0.5433 0.4639 0.0024 0.0024 0 360 EARC s
+n 0.5500 0.4660 0.0024 0.0024 0 360 EARC s
+n 0.5567 0.4680 0.0024 0.0024 0 360 EARC s
+n 0.5633 0.4699 0.0024 0.0024 0 360 EARC s
+n 0.5700 0.4716 0.0024 0.0024 0 360 EARC s
+n 0.5767 0.4732 0.0024 0.0024 0 360 EARC s
+n 0.5833 0.4747 0.0024 0.0024 0 360 EARC s
+n 0.5900 0.4761 0.0024 0.0024 0 360 EARC s
+n 0.5967 0.4774 0.0024 0.0024 0 360 EARC s
+n 0.6033 0.4785 0.0024 0.0024 0 360 EARC s
+n 0.6100 0.4795 0.0024 0.0024 0 360 EARC s
+n 0.6167 0.4805 0.0024 0.0024 0 360 EARC s
+n 0.6233 0.4814 0.0024 0.0024 0 360 EARC s
+n 0.6300 0.4822 0.0024 0.0024 0 360 EARC s
+n 0.6367 0.4829 0.0024 0.0024 0 360 EARC s
+n 0.6433 0.4836 0.0024 0.0024 0 360 EARC s
+n 0.6500 0.4842 0.0024 0.0024 0 360 EARC s
+n 0.6567 0.4847 0.0024 0.0024 0 360 EARC s
+n 0.6633 0.4852 0.0024 0.0024 0 360 EARC s
+n 0.6700 0.4857 0.0024 0.0024 0 360 EARC s
+n 0.6767 0.4862 0.0024 0.0024 0 360 EARC s
+n 0.6833 0.4867 0.0024 0.0024 0 360 EARC s
+n 0.6900 0.4872 0.0024 0.0024 0 360 EARC s
+n 0.6967 0.4877 0.0024 0.0024 0 360 EARC s
+n 0.7033 0.4882 0.0024 0.0024 0 360 EARC s
+n 0.7100 0.4887 0.0024 0.0024 0 360 EARC s
+n 0.7167 0.4891 0.0024 0.0024 0 360 EARC s
+n 0.7233 0.4896 0.0024 0.0024 0 360 EARC s
+n 0.7300 0.4901 0.0024 0.0024 0 360 EARC s
+n 0.7367 0.4906 0.0024 0.0024 0 360 EARC s
+n 0.7433 0.4910 0.0024 0.0024 0 360 EARC s
+n 0.7500 0.4914 0.0024 0.0024 0 360 EARC s
+n 0.7567 0.4919 0.0024 0.0024 0 360 EARC s
+n 0.7633 0.4923 0.0024 0.0024 0 360 EARC s
+n 0.7700 0.4926 0.0024 0.0024 0 360 EARC s
+n 0.7767 0.4930 0.0024 0.0024 0 360 EARC s
+n 0.7833 0.4934 0.0024 0.0024 0 360 EARC s
+n 0.7900 0.4937 0.0024 0.0024 0 360 EARC s
+n 0.7967 0.4940 0.0024 0.0024 0 360 EARC s
+n 0.8033 0.4943 0.0024 0.0024 0 360 EARC s
+n 0.8100 0.4946 0.0024 0.0024 0 360 EARC s
+n 0.8167 0.4948 0.0024 0.0024 0 360 EARC s
+n 0.8233 0.4951 0.0024 0.0024 0 360 EARC s
+n 0.8300 0.4953 0.0024 0.0024 0 360 EARC s
+n 0.8367 0.4955 0.0024 0.0024 0 360 EARC s
+n 0.8433 0.4957 0.0024 0.0024 0 360 EARC s
+n 0.8500 0.4959 0.0024 0.0024 0 360 EARC s
+n 0.8567 0.4961 0.0024 0.0024 0 360 EARC s
+n 0.8633 0.4962 0.0024 0.0024 0 360 EARC s
+n 0.8700 0.4963 0.0024 0.0024 0 360 EARC s
+n 0.8767 0.4965 0.0024 0.0024 0 360 EARC s
+n 0.8833 0.4966 0.0024 0.0024 0 360 EARC s
+n 0.8900 0.4967 0.0024 0.0024 0 360 EARC s
+n 0.8967 0.4968 0.0024 0.0024 0 360 EARC s
+n 0.9033 0.4969 0.0024 0.0024 0 360 EARC s
+n 0.9100 0.4969 0.0024 0.0024 0 360 EARC s
+n 0.9167 0.4970 0.0024 0.0024 0 360 EARC s
+n 0.9233 0.4971 0.0024 0.0024 0 360 EARC s
+n 0.9300 0.4971 0.0024 0.0024 0 360 EARC s
+n 0.9367 0.4972 0.0024 0.0024 0 360 EARC s
+n 0.9433 0.4972 0.0024 0.0024 0 360 EARC s
+n 0.9500 0.4972 0.0024 0.0024 0 360 EARC s
+n 0.9567 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9633 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9700 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9767 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9833 0.4973 0.0024 0.0024 0 360 EARC s
+n 0.9900 0.4974 0.0024 0.0024 0 360 EARC s
+n 0.9967 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0033 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0100 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0167 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0233 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0300 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0367 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0433 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0500 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0567 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0633 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0700 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0767 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0833 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0900 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0967 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1033 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1100 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1167 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1233 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1300 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1367 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1433 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1500 0.4974 0.0024 0.0024 0 360 EARC s
+n 0.1500 0.8138 0.0024 0.0024 0 360 EARC s
+n 0.1567 0.8059 0.0024 0.0024 0 360 EARC s
+n 0.1633 0.7996 0.0024 0.0024 0 360 EARC s
+n 0.1700 0.8039 0.0024 0.0024 0 360 EARC s
+n 0.1767 0.8067 0.0024 0.0024 0 360 EARC s
+n 0.1833 0.8075 0.0024 0.0024 0 360 EARC s
+n 0.1900 0.8069 0.0024 0.0024 0 360 EARC s
+n 0.1967 0.8050 0.0024 0.0024 0 360 EARC s
+n 0.2033 0.8039 0.0024 0.0024 0 360 EARC s
+n 0.2100 0.8032 0.0024 0.0024 0 360 EARC s
+n 0.2167 0.8013 0.0024 0.0024 0 360 EARC s
+n 0.2233 0.7982 0.0024 0.0024 0 360 EARC s
+n 0.2300 0.7941 0.0024 0.0024 0 360 EARC s
+n 0.2367 0.7892 0.0024 0.0024 0 360 EARC s
+n 0.2433 0.7836 0.0024 0.0024 0 360 EARC s
+n 0.2500 0.7772 0.0024 0.0024 0 360 EARC s
+n 0.2567 0.7701 0.0024 0.0024 0 360 EARC s
+n 0.2633 0.7646 0.0024 0.0024 0 360 EARC s
+n 0.2700 0.7593 0.0024 0.0024 0 360 EARC s
+n 0.2767 0.7538 0.0024 0.0024 0 360 EARC s
+n 0.2833 0.7480 0.0024 0.0024 0 360 EARC s
+n 0.2900 0.7418 0.0024 0.0024 0 360 EARC s
+n 0.2967 0.7353 0.0024 0.0024 0 360 EARC s
+n 0.3033 0.7284 0.0024 0.0024 0 360 EARC s
+n 0.3100 0.7212 0.0024 0.0024 0 360 EARC s
+n 0.3167 0.7138 0.0024 0.0024 0 360 EARC s
+n 0.3233 0.7063 0.0024 0.0024 0 360 EARC s
+n 0.3300 0.6988 0.0024 0.0024 0 360 EARC s
+n 0.3367 0.6918 0.0024 0.0024 0 360 EARC s
+n 0.3433 0.6861 0.0024 0.0024 0 360 EARC s
+n 0.3500 0.6804 0.0024 0.0024 0 360 EARC s
+n 0.3567 0.6745 0.0024 0.0024 0 360 EARC s
+n 0.3633 0.6686 0.0024 0.0024 0 360 EARC s
+n 0.3700 0.6626 0.0024 0.0024 0 360 EARC s
+n 0.3767 0.6566 0.0024 0.0024 0 360 EARC s
+n 0.3833 0.6505 0.0024 0.0024 0 360 EARC s
+n 0.3900 0.6445 0.0024 0.0024 0 360 EARC s
+n 0.3967 0.6385 0.0024 0.0024 0 360 EARC s
+n 0.4033 0.6326 0.0024 0.0024 0 360 EARC s
+n 0.4100 0.6266 0.0024 0.0024 0 360 EARC s
+n 0.4167 0.6208 0.0024 0.0024 0 360 EARC s
+n 0.4233 0.6150 0.0024 0.0024 0 360 EARC s
+n 0.4300 0.6094 0.0024 0.0024 0 360 EARC s
+n 0.4367 0.6039 0.0024 0.0024 0 360 EARC s
+n 0.4433 0.5986 0.0024 0.0024 0 360 EARC s
+n 0.4500 0.5934 0.0024 0.0024 0 360 EARC s
+n 0.4567 0.5886 0.0024 0.0024 0 360 EARC s
+n 0.4633 0.5839 0.0024 0.0024 0 360 EARC s
+n 0.4700 0.5796 0.0024 0.0024 0 360 EARC s
+n 0.4767 0.5755 0.0024 0.0024 0 360 EARC s
+n 0.4833 0.5716 0.0024 0.0024 0 360 EARC s
+n 0.4900 0.5679 0.0024 0.0024 0 360 EARC s
+n 0.4967 0.5643 0.0024 0.0024 0 360 EARC s
+n 0.5033 0.5610 0.0024 0.0024 0 360 EARC s
+n 0.5100 0.5578 0.0024 0.0024 0 360 EARC s
+n 0.5167 0.5547 0.0024 0.0024 0 360 EARC s
+n 0.5233 0.5517 0.0024 0.0024 0 360 EARC s
+n 0.5300 0.5488 0.0024 0.0024 0 360 EARC s
+n 0.5367 0.5461 0.0024 0.0024 0 360 EARC s
+n 0.5433 0.5434 0.0024 0.0024 0 360 EARC s
+n 0.5500 0.5409 0.0024 0.0024 0 360 EARC s
+n 0.5567 0.5386 0.0024 0.0024 0 360 EARC s
+n 0.5633 0.5363 0.0024 0.0024 0 360 EARC s
+n 0.5700 0.5342 0.0024 0.0024 0 360 EARC s
+n 0.5767 0.5322 0.0024 0.0024 0 360 EARC s
+n 0.5833 0.5303 0.0024 0.0024 0 360 EARC s
+n 0.5900 0.5285 0.0024 0.0024 0 360 EARC s
+n 0.5967 0.5267 0.0024 0.0024 0 360 EARC s
+n 0.6033 0.5251 0.0024 0.0024 0 360 EARC s
+n 0.6100 0.5235 0.0024 0.0024 0 360 EARC s
+n 0.6167 0.5220 0.0024 0.0024 0 360 EARC s
+n 0.6233 0.5206 0.0024 0.0024 0 360 EARC s
+n 0.6300 0.5193 0.0024 0.0024 0 360 EARC s
+n 0.6367 0.5180 0.0024 0.0024 0 360 EARC s
+n 0.6433 0.5168 0.0024 0.0024 0 360 EARC s
+n 0.6500 0.5156 0.0024 0.0024 0 360 EARC s
+n 0.6567 0.5145 0.0024 0.0024 0 360 EARC s
+n 0.6633 0.5134 0.0024 0.0024 0 360 EARC s
+n 0.6700 0.5124 0.0024 0.0024 0 360 EARC s
+n 0.6767 0.5115 0.0024 0.0024 0 360 EARC s
+n 0.6833 0.5105 0.0024 0.0024 0 360 EARC s
+n 0.6900 0.5097 0.0024 0.0024 0 360 EARC s
+n 0.6967 0.5089 0.0024 0.0024 0 360 EARC s
+n 0.7033 0.5081 0.0024 0.0024 0 360 EARC s
+n 0.7100 0.5073 0.0024 0.0024 0 360 EARC s
+n 0.7167 0.5066 0.0024 0.0024 0 360 EARC s
+n 0.7233 0.5060 0.0024 0.0024 0 360 EARC s
+n 0.7300 0.5053 0.0024 0.0024 0 360 EARC s
+n 0.7367 0.5047 0.0024 0.0024 0 360 EARC s
+n 0.7433 0.5042 0.0024 0.0024 0 360 EARC s
+n 0.7500 0.5036 0.0024 0.0024 0 360 EARC s
+n 0.7567 0.5031 0.0024 0.0024 0 360 EARC s
+n 0.7633 0.5027 0.0024 0.0024 0 360 EARC s
+n 0.7700 0.5022 0.0024 0.0024 0 360 EARC s
+n 0.7767 0.5018 0.0024 0.0024 0 360 EARC s
+n 0.7833 0.5015 0.0024 0.0024 0 360 EARC s
+n 0.7900 0.5011 0.0024 0.0024 0 360 EARC s
+n 0.7967 0.5008 0.0024 0.0024 0 360 EARC s
+n 0.8033 0.5005 0.0024 0.0024 0 360 EARC s
+n 0.8100 0.5002 0.0024 0.0024 0 360 EARC s
+n 0.8167 0.4999 0.0024 0.0024 0 360 EARC s
+n 0.8233 0.4997 0.0024 0.0024 0 360 EARC s
+n 0.8300 0.4995 0.0024 0.0024 0 360 EARC s
+n 0.8367 0.4992 0.0024 0.0024 0 360 EARC s
+n 0.8433 0.4991 0.0024 0.0024 0 360 EARC s
+n 0.8500 0.4989 0.0024 0.0024 0 360 EARC s
+n 0.8567 0.4987 0.0024 0.0024 0 360 EARC s
+n 0.8633 0.4986 0.0024 0.0024 0 360 EARC s
+n 0.8700 0.4984 0.0024 0.0024 0 360 EARC s
+n 0.8767 0.4983 0.0024 0.0024 0 360 EARC s
+n 0.8833 0.4982 0.0024 0.0024 0 360 EARC s
+n 0.8900 0.4981 0.0024 0.0024 0 360 EARC s
+n 0.8967 0.4980 0.0024 0.0024 0 360 EARC s
+n 0.9033 0.4979 0.0024 0.0024 0 360 EARC s
+n 0.9100 0.4979 0.0024 0.0024 0 360 EARC s
+n 0.9167 0.4978 0.0024 0.0024 0 360 EARC s
+n 0.9233 0.4978 0.0024 0.0024 0 360 EARC s
+n 0.9300 0.4977 0.0024 0.0024 0 360 EARC s
+n 0.9367 0.4977 0.0024 0.0024 0 360 EARC s
+n 0.9433 0.4976 0.0024 0.0024 0 360 EARC s
+n 0.9500 0.4976 0.0024 0.0024 0 360 EARC s
+n 0.9567 0.4976 0.0024 0.0024 0 360 EARC s
+n 0.9633 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9700 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9767 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9833 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9900 0.4975 0.0024 0.0024 0 360 EARC s
+n 0.9967 0.4975 0.0024 0.0024 0 360 EARC s
+n 1.0033 0.4975 0.0024 0.0024 0 360 EARC s
+n 1.0100 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0167 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0233 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0300 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0367 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0433 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0500 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0567 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0633 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0700 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0767 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0833 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0900 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.0967 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1033 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1100 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1167 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1233 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1300 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1367 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1433 0.4974 0.0024 0.0024 0 360 EARC s
+n 1.1500 0.4974 0.0024 0.0024 0 360 EARC s
+[/DeviceRGB] SCS
+Color1 SC
+n
+0.1500 0.1500 m
+1.1500 0.1500 l
+s
+n
+0.1500 0.8500 m
+1.1500 0.8500 l
+s
+n
+0.3167 0.1500 m
+0.3167 0.1600 l
+s
+n
+0.3167 0.8500 m
+0.3167 0.8400 l
+s
+n
+0.6500 0.1500 m
+0.6500 0.1600 l
+s
+n
+0.6500 0.8500 m
+0.6500 0.8400 l
+s
+n
+0.9833 0.1500 m
+0.9833 0.1600 l
+s
+n
+0.9833 0.8500 m
+0.9833 0.8400 l
+s
+n
+0.1500 0.1500 m
+0.1500 0.1700 l
+s
+n
+0.1500 0.8500 m
+0.1500 0.8300 l
+s
+n
+0.4833 0.1500 m
+0.4833 0.1700 l
+s
+n
+0.4833 0.8500 m
+0.4833 0.8300 l
+s
+n
+0.8167 0.1500 m
+0.8167 0.1700 l
+s
+n
+0.8167 0.8500 m
+0.8167 0.8300 l
+s
+n
+1.1500 0.1500 m
+1.1500 0.1700 l
+s
+n
+1.1500 0.8500 m
+1.1500 0.8300 l
+s
+/Times-Roman findfont
+dup length dict begin
+ {1 index /FID ne {def} {pop pop} ifelse} forall
+ /Encoding DefEncoding def
+ currentdict
+end
+/Font0 exch definefont pop
+/Font0 FFSF
+0.1433 0.1210 m
+GS
+[0.0280 0.0000 0.0000 0.0280 0 0] CC
+(0) show
+GR
+/Font0 FFSF
+0.4628 0.1206 m
+GS
+[0.0280 0.0000 0.0000 0.0280 0 0] CC
+(500) show
+GR
+/Font0 FFSF
+0.7890 0.1210 m
+GS
+[0.0280 0.0000 0.0000 0.0280 0 0] CC
+(1000) show
+GR
+/Font0 FFSF
+1.1224 0.1206 m
+GS
+[0.0280 0.0000 0.0000 0.0280 0 0] CC
+(1500) show
+GR
+/Font0 FFSF
+0.5978 0.0909 m
+GS
+[0.0280 0.0000 0.0000 0.0280 0 0] CC
+(Iterations) show
+GR
+n
+0.1500 0.1500 m
+0.1500 0.8500 l
+s
+n
+1.1500 0.1500 m
+1.1500 0.8500 l
+s
+n
+0.1500 0.1500 m
+0.1600 0.1500 l
+s
+n
+1.1500 0.1500 m
+1.1400 0.1500 l
+s
+n
+0.1500 0.3833 m
+0.1600 0.3833 l
+s
+n
+1.1500 0.3833 m
+1.1400 0.3833 l
+s
+n
+0.1500 0.6167 m
+0.1600 0.6167 l
+s
+n
+1.1500 0.6167 m
+1.1400 0.6167 l
+s
+n
+0.1500 0.8500 m
+0.1600 0.8500 l
+s
+n
+1.1500 0.8500 m
+1.1400 0.8500 l
+s
+n
+0.1500 0.2667 m
+0.1700 0.2667 l
+s
+n
+1.1500 0.2667 m
+1.1300 0.2667 l
+s
+n
+0.1500 0.5000 m
+0.1700 0.5000 l
+s
+n
+1.1500 0.5000 m
+1.1300 0.5000 l
+s
+n
+0.1500 0.7333 m
+0.1700 0.7333 l
+s
+n
+1.1500 0.7333 m
+1.1300 0.7333 l
+s
+/Font0 FFSF
+0.1267 0.2574 m
+GS
+[0.0280 0.0000 0.0000 0.0280 0 0] CC
+(0) show
+GR
+/Font0 FFSF
+0.1069 0.4923 m
+GS
+[0.0280 0.0000 0.0000 0.0280 0 0] CC
+(0,5) show
+GR
+/Font0 FFSF
+0.1291 0.7239 m
+GS
+[0.0280 0.0000 0.0000 0.0280 0 0] CC
+(1) show
+GR
+/Font0 FFSF
+0.0909 0.3626 m
+GS
+[0.0000 0.0280 -0.0280 0.0000 0 0] CC
+(Scalaire \(min, avg, max\)) show
+GR
+n
+0.1500 0.1500 m
+0.1500 0.8500 l
+1.1500 0.8500 l
+1.1500 0.1500 l
+0.1500 0.1500 l
+c
+s
+n
+0.8500 0.8000 m
+0.8500 0.6604 l
+1.1494 0.6604 l
+1.1494 0.8000 l
+c
+[/DeviceRGB] SCS
+Color0 SC
+fill
+[/DeviceRGB] SCS
+Color1 SC
+n
+0.8500 0.8000 m
+0.8500 0.6604 l
+1.1494 0.6604 l
+1.1494 0.8000 l
+0.8500 0.8000 l
+c
+s
+/Font0 FFSF
+0.9208 0.7706 m
+GS
+[0.0280 0.0000 0.0000 0.0280 0 0] CC
+(isotrope \(reference\)) show
+GR
+[/DeviceRGB] SCS
+Color2 SC
+n
+0.8608 0.7773 m
+0.9008 0.7773 l
+s
+/Font0 FFSF
+[/DeviceRGB] SCS
+Color1 SC
+0.9208 0.7352 m
+GS
+[0.0280 0.0000 0.0000 0.0280 0 0] CC
+(isotrope) show
+GR
+[/DeviceRGB] SCS
+Color2 SC
+n 0.8808 0.7419 0.0024 0.0024 0 360 EARC s
+/Font0 FFSF
+[/DeviceRGB] SCS
+Color1 SC
+0.9208 0.7063 m
+GS
+[0.0280 0.0000 0.0000 0.0280 0 0] CC
+(none) show
+GR
+[/DeviceRGB] SCS
+Color4 SC
+n 0.8808 0.7125 0.0024 0.0024 0 360 EARC s
+/Font0 FFSF
+[/DeviceRGB] SCS
+Color1 SC
+0.9208 0.6765 m
+GS
+[0.0280 0.0000 0.0000 0.0280 0 0] CC
+(anisotrope) show
+GR
+[/DeviceRGB] SCS
+Color3 SC
+n 0.8808 0.6832 0.0024 0.0024 0 360 EARC s
+/Font0 FFSF
+[/DeviceRGB] SCS
+Color1 SC
+0.2115 0.9193 m
+GS
+[0.0420 0.0000 0.0000 0.0420 0 0] CC
+(Evolution du scalaire pour 3 methodes de dealiasing) show
+GR
+/Font0 FFSF
+0.4947 0.8740 m
+GS
+[0.0280 0.0000 0.0000 0.0280 0 0] CC
+(Schmidt=0.1, 128x128x128) show
+GR
+%%Trailer
+%%BoundingBox: 41 52 702 565
+%%DocumentNeededResources: font Times-Roman
+%%EOF
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/images/dealias.png b/HySoP/src/Unstable/LEGI/doc/doxygen/images/dealias.png
new file mode 100644
index 0000000000000000000000000000000000000000..ccc247514effb77ac0c9d2cd0e7bb543c0c0c81f
Binary files /dev/null and b/HySoP/src/Unstable/LEGI/doc/doxygen/images/dealias.png differ
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/images/parallel.eps b/HySoP/src/Unstable/LEGI/doc/doxygen/images/parallel.eps
new file mode 100644
index 0000000000000000000000000000000000000000..39b0e196f28fd8d73fb86ac5eafd8bdc83ef2da2
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/doc/doxygen/images/parallel.eps
@@ -0,0 +1,209 @@
+%!PS-Adobe-2.0 EPSF-2.0
+%%Title: parallel.fig
+%%Creator: fig2dev Version 3.2 Patchlevel 5
+%%CreationDate: Tue Jul 19 12:08:34 2011
+%%For: begou@thor (Patrick Begou)
+%%BoundingBox: 0 0 396 407
+%Magnification: 1.0000
+%%EndComments
+%%BeginProlog
+/$F2psDict 200 dict def
+$F2psDict begin
+$F2psDict /mtrx matrix put
+/col-1 {0 setgray} bind def
+/col0 {0.000 0.000 0.000 srgb} bind def
+/col1 {0.000 0.000 1.000 srgb} bind def
+/col2 {0.000 1.000 0.000 srgb} bind def
+/col3 {0.000 1.000 1.000 srgb} bind def
+/col4 {1.000 0.000 0.000 srgb} bind def
+/col5 {1.000 0.000 1.000 srgb} bind def
+/col6 {1.000 1.000 0.000 srgb} bind def
+/col7 {1.000 1.000 1.000 srgb} bind def
+/col8 {0.000 0.000 0.560 srgb} bind def
+/col9 {0.000 0.000 0.690 srgb} bind def
+/col10 {0.000 0.000 0.820 srgb} bind def
+/col11 {0.530 0.810 1.000 srgb} bind def
+/col12 {0.000 0.560 0.000 srgb} bind def
+/col13 {0.000 0.690 0.000 srgb} bind def
+/col14 {0.000 0.820 0.000 srgb} bind def
+/col15 {0.000 0.560 0.560 srgb} bind def
+/col16 {0.000 0.690 0.690 srgb} bind def
+/col17 {0.000 0.820 0.820 srgb} bind def
+/col18 {0.560 0.000 0.000 srgb} bind def
+/col19 {0.690 0.000 0.000 srgb} bind def
+/col20 {0.820 0.000 0.000 srgb} bind def
+/col21 {0.560 0.000 0.560 srgb} bind def
+/col22 {0.690 0.000 0.690 srgb} bind def
+/col23 {0.820 0.000 0.820 srgb} bind def
+/col24 {0.500 0.190 0.000 srgb} bind def
+/col25 {0.630 0.250 0.000 srgb} bind def
+/col26 {0.750 0.380 0.000 srgb} bind def
+/col27 {1.000 0.500 0.500 srgb} bind def
+/col28 {1.000 0.630 0.630 srgb} bind def
+/col29 {1.000 0.750 0.750 srgb} bind def
+/col30 {1.000 0.880 0.880 srgb} bind def
+/col31 {1.000 0.840 0.000 srgb} bind def
+
+end
+save
+newpath 0 407 moveto 0 0 lineto 396 0 lineto 396 407 lineto closepath clip newpath
+-211.8 496.8 translate
+1 -1 scale
+
+/cp {closepath} bind def
+/ef {eofill} bind def
+/gr {grestore} bind def
+/gs {gsave} bind def
+/sa {save} bind def
+/rs {restore} bind def
+/l {lineto} bind def
+/m {moveto} bind def
+/rm {rmoveto} bind def
+/n {newpath} bind def
+/s {stroke} bind def
+/sh {show} bind def
+/slc {setlinecap} bind def
+/slj {setlinejoin} bind def
+/slw {setlinewidth} bind def
+/srgb {setrgbcolor} bind def
+/rot {rotate} bind def
+/sc {scale} bind def
+/sd {setdash} bind def
+/ff {findfont} bind def
+/sf {setfont} bind def
+/scf {scalefont} bind def
+/sw {stringwidth} bind def
+/tr {translate} bind def
+/tnt {dup dup currentrgbcolor
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add srgb}
+  bind def
+/shd {dup dup currentrgbcolor 4 -2 roll mul 4 -2 roll mul
+  4 -2 roll mul srgb} bind def
+/$F2psBegin {$F2psDict begin /$F2psEnteredState save def} def
+/$F2psEnd {$F2psEnteredState restore end} def
+
+$F2psBegin
+10 setmiterlimit
+0 slj 0 slc
+ 0.06299 0.06299 sc
+%%EndProlog
+%
+% Fig objects follow
+%
+% 
+% here starts figure with depth 50
+% Polyline
+0 slj
+0 slc
+30.000 slw
+gs  clippath
+5143 6351 m 4873 6232 l 4819 6355 l 5088 6474 l 5088 6474 l 4910 6322 l 5143 6351 l cp
+eoclip
+n 6390 6975 m
+ 4860 6300 l gs col0 s gr gr
+
+% arrowhead
+n 5143 6351 m 4910 6322 l 5088 6474 l 5074 6394 l 5143 6351 l 
+ cp gs 0.00 setgray ef gr  col0 s
+% Polyline
+gs  clippath
+7868 6425 m 8140 6311 l 8087 6186 l 7816 6301 l 7816 6301 l 8050 6276 l 7868 6425 l cp
+eoclip
+n 6390 6975 m
+ 8100 6255 l gs col0 s gr gr
+
+% arrowhead
+n 7868 6425 m 8050 6276 l 7816 6301 l 7884 6345 l 7868 6425 l 
+ cp gs 0.00 setgray ef gr  col0 s
+% Polyline
+ [120] 0 sd
+gs  clippath
+4484 4682 m 4346 4878 l 4456 4956 l 4594 4760 l 4594 4760 l 4436 4869 l 4484 4682 l cp
+eoclip
+n 5895 2790 m
+ 4410 4905 l gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 4484 4682 m 4436 4869 l 4594 4760 l 4484 4682 l  cp gs col7 1.00 shd ef gr  col0 s
+% Polyline
+ [120] 0 sd
+gs  clippath
+7501 4817 m 7652 5004 l 7756 4919 l 7605 4732 l 7605 4732 l 7667 4915 l 7501 4817 l cp
+eoclip
+n 5940 2790 m
+ 7695 4950 l gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 7501 4817 m 7667 4915 l 7605 4732 l 7501 4817 l  cp gs col7 1.00 shd ef gr  col0 s
+% Polyline
+7.500 slw
+n 3480 4905 m 3375 4905 3375 6195 105 arcto 4 {pop} repeat
+  3375 6300 6195 6300 105 arcto 4 {pop} repeat
+  6300 6300 6300 5010 105 arcto 4 {pop} repeat
+  6300 4905 3480 4905 105 arcto 4 {pop} repeat
+ cp gs col0 s gr 
+% Polyline
+n 6810 4950 m 6705 4950 6705 6150 105 arcto 4 {pop} repeat
+  6705 6255 9525 6255 105 arcto 4 {pop} repeat
+  9630 6255 9630 5055 105 arcto 4 {pop} repeat
+  9630 4950 6810 4950 105 arcto 4 {pop} repeat
+ cp gs col0 s gr 
+% Polyline
+n 4920 6975 m 4815 6975 4815 7770 105 arcto 4 {pop} repeat
+  4815 7875 7815 7875 105 arcto 4 {pop} repeat
+  7920 7875 7920 7080 105 arcto 4 {pop} repeat
+  7920 6975 4920 6975 105 arcto 4 {pop} repeat
+ cp gs col0 s gr 
+% Polyline
+n 4500 1440 m 7695 1440 l 7695 2790 l 4500 2790 l
+ cp gs col0 s gr 
+/Times-Roman ff 190.50 scf sf
+6075 4545 m
+gs 1 -1 sc (automatic code generation) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+5850 6615 m
+gs 1 -1 sc  328.0 rot (use) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+7020 6615 m
+gs 1 -1 sc  30.0 rot (use) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+4680 5850 m
+gs 1 -1 sc (for real type values.) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+8055 5850 m
+gs 1 -1 sc (for complex type values.) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+6435 7695 m
+gs 1 -1 sc (Generic interface for parallel) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Italic ff 190.50 scf sf
+5985 2160 m
+gs 1 -1 sc (Module generic implementation) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Italic ff 190.50 scf sf
+5940 2475 m
+gs 1 -1 sc (with tags.) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-BoldItalic ff 222.25 scf sf
+6075 1710 m
+gs 1 -1 sc (implementparallel.Fortran) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Bold ff 222.25 scf sf
+4815 5175 m
+gs 1 -1 sc (module realparallel) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+4725 5580 m
+gs 1 -1 sc (Parallel code and functions) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Bold ff 222.25 scf sf
+8145 5220 m
+gs 1 -1 sc (module cmplxparallel) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Roman ff 190.50 scf sf
+8145 5625 m
+gs 1 -1 sc (Parallel code and functions) dup sw pop 2 div neg 0 rm  col0 sh gr
+/Times-Bold ff 222.25 scf sf
+6435 7290 m
+gs 1 -1 sc (module parallel) dup sw pop 2 div neg 0 rm  col0 sh gr
+% here ends figure;
+$F2psEnd
+rs
+showpage
+%%Trailer
+%EOF
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/images/parallel.fig b/HySoP/src/Unstable/LEGI/doc/doxygen/images/parallel.fig
new file mode 100644
index 0000000000000000000000000000000000000000..998b3e50b842201ef8b9ebd2764482e432fb0423
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/doc/doxygen/images/parallel.fig
@@ -0,0 +1,43 @@
+#FIG 3.2  Produced by xfig version 3.2.5
+Landscape
+Center
+Metric
+A4      
+100.00
+Single
+-2
+1200 2
+2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	2 1 3.00 135.00 180.00
+	 6390 6975 4860 6300
+2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	2 1 3.00 135.00 180.00
+	 6390 6975 8100 6255
+2 1 1 3 0 7 50 -1 -1 8.000 0 0 -1 1 0 2
+	1 0 3.00 135.00 180.00
+	 5895 2790 4410 4905
+2 1 1 3 0 7 50 -1 -1 8.000 0 0 -1 1 0 2
+	1 0 3.00 135.00 180.00
+	 5940 2790 7695 4950
+2 4 0 1 0 7 50 -1 -1 0.000 0 0 7 0 0 5
+	 6300 6300 6300 4905 3375 4905 3375 6300 6300 6300
+2 4 0 1 0 7 50 -1 -1 0.000 0 0 7 0 0 5
+	 9630 6255 9630 4950 6705 4950 6705 6255 9630 6255
+2 4 0 1 0 7 50 -1 -1 0.000 0 0 7 0 0 5
+	 7920 7875 7920 6975 4815 6975 4815 7875 7920 7875
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 4500 1440 7695 1440 7695 2790 4500 2790 4500 1440
+4 1 0 50 -1 0 12 0.0000 4 180 2160 6075 4545 automatic code generation\001
+4 1 0 50 -1 0 12 5.7247 4 90 270 5850 6615 use\001
+4 1 0 50 -1 0 12 0.5236 4 90 270 7020 6615 use\001
+4 1 0 50 -1 0 12 0.0000 4 180 1590 4680 5850 for real type values.\001
+4 1 0 50 -1 0 12 0.0000 4 180 1995 8055 5850 for complex type values.\001
+4 1 0 50 -1 0 12 0.0000 4 180 2310 6435 7695 Generic interface for parallel\001
+4 1 0 50 -1 1 12 0.0000 4 180 2610 5985 2160 Module generic implementation\001
+4 1 0 50 -1 1 12 0.0000 4 180 795 5940 2475 with tags.\001
+4 1 0 50 -1 3 14 0.0000 4 210 2580 6075 1710 implementparallel.Fortran\001
+4 1 0 50 -1 2 14 0.0000 4 210 1965 4815 5175 module realparallel\001
+4 1 0 50 -1 0 12 0.0000 4 135 2205 4725 5580 Parallel code and functions\001
+4 1 0 50 -1 2 14 0.0000 4 210 2190 8145 5220 module cmplxparallel\001
+4 1 0 50 -1 0 12 0.0000 4 135 2205 8145 5625 Parallel code and functions\001
+4 1 0 50 -1 2 14 0.0000 4 210 1575 6435 7290 module parallel\001
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/images/parallel.png b/HySoP/src/Unstable/LEGI/doc/doxygen/images/parallel.png
new file mode 100644
index 0000000000000000000000000000000000000000..642d7dd9e129d8928dad1bd901b0246d42674d0c
Binary files /dev/null and b/HySoP/src/Unstable/LEGI/doc/doxygen/images/parallel.png differ
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/images/xycommunicator.eps b/HySoP/src/Unstable/LEGI/doc/doxygen/images/xycommunicator.eps
new file mode 100644
index 0000000000000000000000000000000000000000..5d79e43685047e31f1f9daa4a6bae95279f56301
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/doc/doxygen/images/xycommunicator.eps
@@ -0,0 +1,246 @@
+%!PS-Adobe-2.0 EPSF-2.0
+%%Title: xycommunicator.fig
+%%Creator: fig2dev Version 3.2 Patchlevel 5
+%%CreationDate: Thu Jun 30 17:38:58 2011
+%%For: begou@thor (Patrick Begou)
+%%BoundingBox: 0 0 399 148
+%Magnification: 1.0000
+%%EndComments
+%%BeginProlog
+/$F2psDict 200 dict def
+$F2psDict begin
+$F2psDict /mtrx matrix put
+/col-1 {0 setgray} bind def
+/col0 {0.000 0.000 0.000 srgb} bind def
+/col1 {0.000 0.000 1.000 srgb} bind def
+/col2 {0.000 1.000 0.000 srgb} bind def
+/col3 {0.000 1.000 1.000 srgb} bind def
+/col4 {1.000 0.000 0.000 srgb} bind def
+/col5 {1.000 0.000 1.000 srgb} bind def
+/col6 {1.000 1.000 0.000 srgb} bind def
+/col7 {1.000 1.000 1.000 srgb} bind def
+/col8 {0.000 0.000 0.560 srgb} bind def
+/col9 {0.000 0.000 0.690 srgb} bind def
+/col10 {0.000 0.000 0.820 srgb} bind def
+/col11 {0.530 0.810 1.000 srgb} bind def
+/col12 {0.000 0.560 0.000 srgb} bind def
+/col13 {0.000 0.690 0.000 srgb} bind def
+/col14 {0.000 0.820 0.000 srgb} bind def
+/col15 {0.000 0.560 0.560 srgb} bind def
+/col16 {0.000 0.690 0.690 srgb} bind def
+/col17 {0.000 0.820 0.820 srgb} bind def
+/col18 {0.560 0.000 0.000 srgb} bind def
+/col19 {0.690 0.000 0.000 srgb} bind def
+/col20 {0.820 0.000 0.000 srgb} bind def
+/col21 {0.560 0.000 0.560 srgb} bind def
+/col22 {0.690 0.000 0.690 srgb} bind def
+/col23 {0.820 0.000 0.820 srgb} bind def
+/col24 {0.500 0.190 0.000 srgb} bind def
+/col25 {0.630 0.250 0.000 srgb} bind def
+/col26 {0.750 0.380 0.000 srgb} bind def
+/col27 {1.000 0.500 0.500 srgb} bind def
+/col28 {1.000 0.630 0.630 srgb} bind def
+/col29 {1.000 0.750 0.750 srgb} bind def
+/col30 {1.000 0.880 0.880 srgb} bind def
+/col31 {1.000 0.840 0.000 srgb} bind def
+
+end
+save
+newpath 0 148 moveto 0 0 lineto 399 0 lineto 399 148 lineto closepath clip newpath
+-70.0 259.6 translate
+1 -1 scale
+
+/cp {closepath} bind def
+/ef {eofill} bind def
+/gr {grestore} bind def
+/gs {gsave} bind def
+/sa {save} bind def
+/rs {restore} bind def
+/l {lineto} bind def
+/m {moveto} bind def
+/rm {rmoveto} bind def
+/n {newpath} bind def
+/s {stroke} bind def
+/sh {show} bind def
+/slc {setlinecap} bind def
+/slj {setlinejoin} bind def
+/slw {setlinewidth} bind def
+/srgb {setrgbcolor} bind def
+/rot {rotate} bind def
+/sc {scale} bind def
+/sd {setdash} bind def
+/ff {findfont} bind def
+/sf {setfont} bind def
+/scf {scalefont} bind def
+/sw {stringwidth} bind def
+/tr {translate} bind def
+/tnt {dup dup currentrgbcolor
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add srgb}
+  bind def
+/shd {dup dup currentrgbcolor 4 -2 roll mul 4 -2 roll mul
+  4 -2 roll mul srgb} bind def
+/$F2psBegin {$F2psDict begin /$F2psEnteredState save def} def
+/$F2psEnd {$F2psEnteredState restore end} def
+
+$F2psBegin
+10 setmiterlimit
+0 slj 0 slc
+ 0.06299 0.06299 sc
+%%EndProlog
+%
+% Fig objects follow
+%
+% 
+% here starts figure with depth 51
+% Polyline
+2 slj
+0 slc
+7.500 slw
+gs  clippath
+4360 3919 m 4556 3788 l 4490 3688 l 4294 3819 l 4294 3819 l 4499 3755 l 4360 3919 l cp
+eoclip
+n 3185 3362 m 3183 3364 l 3179 3368 l 3171 3374 l 3161 3385 l 3147 3398 l
+ 3131 3414 l 3113 3433 l 3095 3453 l 3077 3475 l 3060 3499 l
+ 3044 3524 l 3029 3550 l 3017 3579 l 3008 3609 l 3002 3643 l
+ 3001 3679 l 3004 3716 l 3011 3746 l 3020 3775 l 3030 3801 l
+ 3040 3824 l 3049 3843 l 3058 3859 l 3065 3872 l 3071 3883 l
+ 3077 3892 l 3082 3900 l 3087 3907 l 3092 3914 l 3099 3922 l
+ 3107 3929 l 3118 3939 l 3131 3949 l 3148 3961 l 3169 3975 l
+ 3194 3991 l 3225 4008 l 3259 4025 l 3298 4041 l 3335 4054 l
+ 3372 4064 l 3407 4073 l 3438 4080 l 3465 4085 l 3489 4089 l
+ 3508 4092 l 3523 4094 l 3536 4095 l 3547 4096 l 3556 4097 l
+ 3564 4097 l 3573 4096 l 3583 4096 l 3594 4095 l 3608 4094 l
+ 3626 4092 l 3648 4089 l 3675 4086 l 3708 4081 l 3746 4076 l
+ 3789 4068 l 3836 4059 l 3886 4048 l 3933 4036 l 3978 4022 l
+ 4021 4008 l 4063 3993 l 4102 3978 l 4138 3963 l 4173 3947 l
+ 4206 3931 l 4238 3915 l 4268 3899 l 4297 3883 l 4325 3867 l
+ 4351 3851 l 4377 3836 l 4400 3821 l 4422 3807 l 4442 3794 l
+ 4460 3782 l 4475 3772 l 4487 3764 l 4496 3757 l
+ 4511 3747 l gs col0 s gr gr
+
+% arrowhead
+0 slj
+n 4360 3919 m 4499 3755 l 4294 3819 l 4361 3846 l 4360 3919 l 
+ cp gs 0.00 setgray ef gr  col0 s
+% Polyline
+15.000 slw
+n 4500 2700 m 5850 2700 l 5850 3600 l 4500 3600 l
+ cp gs col0 s gr 
+% Polyline
+n 4500 2700 m 5850 1800 l 7200 1800 l 5850 2700 l 5850 3600 l 7200 2700 l
+
+ 7200 1800 l gs col0 s gr 
+% Polyline
+7.500 slw
+n 4500 3150 m
+ 5850 3150 l gs col0 s gr 
+% Polyline
+n 5850 3150 m
+ 7200 2250 l gs col0 s gr 
+% Polyline
+15.000 slw
+n 1350 2700 m 2700 2700 l 2700 3600 l 1350 3600 l
+ cp gs col0 s gr 
+% Polyline
+n 1350 2700 m 2700 1800 l 4050 1800 l 2700 2700 l 2700 3600 l 4050 2700 l
+
+ 4050 1800 l gs col0 s gr 
+% Polyline
+7.500 slw
+n 1800 3600 m 1800 2700 l
+ 3150 1800 l gs col0 s gr 
+% Polyline
+n 1350 3150 m
+ 2700 3150 l gs col0 s gr 
+% Polyline
+n 2700 3150 m
+ 4050 2250 l gs col0 s gr 
+% Polyline
+n 6300 3285 m 6300 2385 l
+ 4950 2385 l gs col0 s gr 
+% Polyline
+n 6750 3015 m 6750 2115 l
+ 5355 2115 l gs col0 s gr 
+% Polyline
+n 2250 3600 m 2250 2700 l
+ 3600 1800 l gs col0 s gr 
+/Times-Roman ff 190.50 scf sf
+6030 2835 m
+gs 1 -1 sc (3) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+1980 3015 m
+gs 1 -1 sc (4) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+2430 3015 m
+gs 1 -1 sc (3) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+1530 3015 m
+gs 1 -1 sc (5) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+2430 3420 m
+gs 1 -1 sc (0) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+1980 3420 m
+gs 1 -1 sc (1) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+1530 3420 m
+gs 1 -1 sc (2) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+6030 3330 m
+gs 1 -1 sc (0) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+6480 3015 m
+gs 1 -1 sc (1) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+6930 2700 m
+gs 1 -1 sc (2) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+6930 2250 m
+gs 1 -1 sc (5) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+6435 2565 m
+gs 1 -1 sc (4) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+1170 3645 m
+gs 1 -1 sc (Y) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+4140 2700 m
+gs 1 -1 sc (X) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+2610 2655 m
+gs 1 -1 sc (Z) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+5760 2610 m
+gs 1 -1 sc (Z) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+4275 3690 m
+gs 1 -1 sc (Y) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+7290 2700 m
+gs 1 -1 sc (X) col0 sh gr
+/Times-BoldItalic ff 190.50 scf sf
+1755 3780 m
+gs 1 -1 sc (ncpus1) col0 sh gr
+/Times-BoldItalic ff 190.50 scf sf
+4455 3420 m
+gs 1 -1 sc  90.0 rot (ncpus2) col0 sh gr
+/Times-BoldItalic ff 190.50 scf sf
+1260 3375 m
+gs 1 -1 sc  90.0 rot (ncpus2) col0 sh gr
+/Times-BoldItalic ff 190.50 scf sf
+6460 3388 m
+gs 1 -1 sc  30.0 rot (ncpus1) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+4500 4052 m
+gs 1 -1 sc (alongY distribution) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+1342 4044 m
+gs 1 -1 sc (alongX distribution) col0 sh gr
+% here ends figure;
+$F2psEnd
+rs
+showpage
+%%Trailer
+%EOF
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/images/xycommunicator.fig b/HySoP/src/Unstable/LEGI/doc/doxygen/images/xycommunicator.fig
new file mode 100644
index 0000000000000000000000000000000000000000..3ff50e2244cd087a23437f3b954bfd7d2aaea54e
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/doc/doxygen/images/xycommunicator.fig
@@ -0,0 +1,63 @@
+#FIG 3.2  Produced by xfig version 3.2.5
+Landscape
+Center
+Metric
+A4      
+100.00
+Single
+-2
+1200 2
+2 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 4500 2700 5850 2700 5850 3600 4500 3600 4500 2700
+2 1 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 7
+	 4500 2700 5850 1800 7200 1800 5850 2700 5850 3600 7200 2700
+	 7200 1800
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 4500 3150 5850 3150
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 5850 3150 7200 2250
+2 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 1350 2700 2700 2700 2700 3600 1350 3600 1350 2700
+2 1 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 7
+	 1350 2700 2700 1800 4050 1800 2700 2700 2700 3600 4050 2700
+	 4050 1800
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+	 1800 3600 1800 2700 3150 1800
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 1350 3150 2700 3150
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 2700 3150 4050 2250
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+	 6300 3285 6300 2385 4950 2385
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+	 6750 3015 6750 2115 5355 2115
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+	 2250 3600 2250 2700 3600 1800
+3 2 0 1 0 7 51 -1 -1 0.000 0 1 0 5
+	2 1 1.00 120.00 165.00
+	 3185 3362 3004 3716 3298 4041 3886 4048 4511 3747
+	 0.000 -1.000 -1.000 -1.000 0.000
+4 0 0 50 -1 0 12 0.0000 4 135 105 6030 2835 3\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 1980 3015 4\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 2430 3015 3\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 1530 3015 5\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 2430 3420 0\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 1980 3420 1\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 1530 3420 2\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 6030 3330 0\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 6480 3015 1\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 6930 2700 2\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 6930 2250 5\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 6435 2565 4\001
+4 0 0 50 -1 0 12 0.0000 4 135 135 1170 3645 Y\001
+4 0 0 50 -1 0 12 0.0000 4 135 135 4140 2700 X\001
+4 0 0 50 -1 0 12 0.0000 4 135 120 2610 2655 Z\001
+4 0 0 50 -1 0 12 0.0000 4 135 120 5760 2610 Z\001
+4 0 0 50 -1 0 12 0.0000 4 135 135 4275 3690 Y\001
+4 0 0 50 -1 0 12 0.0000 4 135 135 7290 2700 X\001
+4 0 0 50 -1 3 12 0.0000 4 165 585 1755 3780 ncpus1\001
+4 0 0 50 -1 3 12 1.5708 4 165 585 4455 3420 ncpus2\001
+4 0 0 50 -1 3 12 1.5708 4 165 585 1260 3375 ncpus2\001
+4 0 0 50 -1 3 12 0.5236 4 165 585 6460 3388 ncpus1\001
+4 0 0 50 -1 0 12 0.0000 4 180 1605 4500 4052 alongY distribution\001
+4 0 0 50 -1 0 12 0.0000 4 180 1605 1342 4044 alongX distribution\001
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/images/xycommunicator.png b/HySoP/src/Unstable/LEGI/doc/doxygen/images/xycommunicator.png
new file mode 100644
index 0000000000000000000000000000000000000000..7237fe05aba08ddab9531a36db3f7a946407faf9
Binary files /dev/null and b/HySoP/src/Unstable/LEGI/doc/doxygen/images/xycommunicator.png differ
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/images/yzcommunicator.eps b/HySoP/src/Unstable/LEGI/doc/doxygen/images/yzcommunicator.eps
new file mode 100644
index 0000000000000000000000000000000000000000..a8dece8fba6263b1f3649b01457f5d9a2e642a74
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/doc/doxygen/images/yzcommunicator.eps
@@ -0,0 +1,230 @@
+%!PS-Adobe-2.0 EPSF-2.0
+%%Title: yzcommunicator.fig
+%%Creator: fig2dev Version 3.2 Patchlevel 5
+%%CreationDate: Thu Jun 30 17:40:20 2011
+%%For: begou@thor (Patrick Begou)
+%%BoundingBox: 0 0 414 149
+%Magnification: 1.0000
+%%EndComments
+%%BeginProlog
+/$F2psDict 200 dict def
+$F2psDict begin
+$F2psDict /mtrx matrix put
+/col-1 {0 setgray} bind def
+/col0 {0.000 0.000 0.000 srgb} bind def
+/col1 {0.000 0.000 1.000 srgb} bind def
+/col2 {0.000 1.000 0.000 srgb} bind def
+/col3 {0.000 1.000 1.000 srgb} bind def
+/col4 {1.000 0.000 0.000 srgb} bind def
+/col5 {1.000 0.000 1.000 srgb} bind def
+/col6 {1.000 1.000 0.000 srgb} bind def
+/col7 {1.000 1.000 1.000 srgb} bind def
+/col8 {0.000 0.000 0.560 srgb} bind def
+/col9 {0.000 0.000 0.690 srgb} bind def
+/col10 {0.000 0.000 0.820 srgb} bind def
+/col11 {0.530 0.810 1.000 srgb} bind def
+/col12 {0.000 0.560 0.000 srgb} bind def
+/col13 {0.000 0.690 0.000 srgb} bind def
+/col14 {0.000 0.820 0.000 srgb} bind def
+/col15 {0.000 0.560 0.560 srgb} bind def
+/col16 {0.000 0.690 0.690 srgb} bind def
+/col17 {0.000 0.820 0.820 srgb} bind def
+/col18 {0.560 0.000 0.000 srgb} bind def
+/col19 {0.690 0.000 0.000 srgb} bind def
+/col20 {0.820 0.000 0.000 srgb} bind def
+/col21 {0.560 0.000 0.560 srgb} bind def
+/col22 {0.690 0.000 0.690 srgb} bind def
+/col23 {0.820 0.000 0.820 srgb} bind def
+/col24 {0.500 0.190 0.000 srgb} bind def
+/col25 {0.630 0.250 0.000 srgb} bind def
+/col26 {0.750 0.380 0.000 srgb} bind def
+/col27 {1.000 0.500 0.500 srgb} bind def
+/col28 {1.000 0.630 0.630 srgb} bind def
+/col29 {1.000 0.750 0.750 srgb} bind def
+/col30 {1.000 0.880 0.880 srgb} bind def
+/col31 {1.000 0.840 0.000 srgb} bind def
+
+end
+save
+newpath 0 149 moveto 0 0 lineto 414 0 lineto 414 149 lineto closepath clip newpath
+-268.3 260.1 translate
+1 -1 scale
+
+/cp {closepath} bind def
+/ef {eofill} bind def
+/gr {grestore} bind def
+/gs {gsave} bind def
+/sa {save} bind def
+/rs {restore} bind def
+/l {lineto} bind def
+/m {moveto} bind def
+/rm {rmoveto} bind def
+/n {newpath} bind def
+/s {stroke} bind def
+/sh {show} bind def
+/slc {setlinecap} bind def
+/slj {setlinejoin} bind def
+/slw {setlinewidth} bind def
+/srgb {setrgbcolor} bind def
+/rot {rotate} bind def
+/sc {scale} bind def
+/sd {setdash} bind def
+/ff {findfont} bind def
+/sf {setfont} bind def
+/scf {scalefont} bind def
+/sw {stringwidth} bind def
+/tr {translate} bind def
+/tnt {dup dup currentrgbcolor
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add srgb}
+  bind def
+/shd {dup dup currentrgbcolor 4 -2 roll mul 4 -2 roll mul
+  4 -2 roll mul srgb} bind def
+/$F2psBegin {$F2psDict begin /$F2psEnteredState save def} def
+/$F2psEnd {$F2psEnteredState restore end} def
+
+$F2psBegin
+10 setmiterlimit
+0 slj 0 slc
+ 0.06299 0.06299 sc
+%%EndProlog
+%
+% Fig objects follow
+%
+% 
+% here starts figure with depth 51
+% Arc
+7.500 slw
+0 slc
+gs  clippath
+6695 3364 m 6574 3566 l 6677 3627 l 6798 3425 l 6798 3425 l 6641 3572 l 6695 3364 l cp
+eoclip
+n 7125.6 3774.7 527.3 35.6485 -158.7948 arcn
+gs col0 s gr
+ gr
+
+% arrowhead
+0 slj
+n 6695 3364 m 6641 3572 l 6798 3425 l 6725 3430 l 6695 3364 l 
+ cp gs 0.00 setgray ef gr  col0 s
+% Polyline
+15.000 slw
+n 4500 2700 m 5850 2700 l 5850 3600 l 4500 3600 l
+ cp gs col0 s gr 
+% Polyline
+n 4500 2700 m 5850 1800 l 7200 1800 l 5850 2700 l 5850 3600 l 7200 2700 l
+
+ 7200 1800 l gs col0 s gr 
+% Polyline
+7.500 slw
+n 4500 3150 m
+ 5850 3150 l gs col0 s gr 
+% Polyline
+n 5850 3150 m
+ 7200 2250 l gs col0 s gr 
+% Polyline
+n 6300 3285 m 6300 2385 l
+ 4950 2385 l gs col0 s gr 
+% Polyline
+n 6750 3015 m 6750 2115 l
+ 5355 2115 l gs col0 s gr 
+% Polyline
+15.000 slw
+n 7863 2702 m 9213 2702 l 9213 3602 l 7863 3602 l
+ cp gs col0 s gr 
+% Polyline
+n 7863 2702 m 9213 1802 l 10563 1802 l 9213 2702 l 9213 3602 l 10563 2702 l
+
+ 10563 1802 l gs col0 s gr 
+% Polyline
+7.500 slw
+n 9879 1790 m
+ 8533 2694 l gs col0 s gr 
+% Polyline
+n 10122 3003 m 10122 2084 l
+ 8761 2077 l gs col0 s gr 
+% Polyline
+n 9680 3305 m 9680 2386 l
+ 8313 2393 l gs col0 s gr 
+% Polyline
+n 8548 2687 m
+ 8548 3606 l gs col0 s gr 
+/Times-Roman ff 190.50 scf sf
+6030 2835 m
+gs 1 -1 sc (3) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+6030 3330 m
+gs 1 -1 sc (0) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+6480 3015 m
+gs 1 -1 sc (1) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+6930 2700 m
+gs 1 -1 sc (2) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+6930 2250 m
+gs 1 -1 sc (5) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+6435 2565 m
+gs 1 -1 sc (4) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+5760 2610 m
+gs 1 -1 sc (Z) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+4275 3690 m
+gs 1 -1 sc (Y) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+7290 2700 m
+gs 1 -1 sc (X) col0 sh gr
+/Times-BoldItalic ff 190.50 scf sf
+4455 3420 m
+gs 1 -1 sc  90.0 rot (ncpus2) col0 sh gr
+/Times-BoldItalic ff 190.50 scf sf
+6460 3388 m
+gs 1 -1 sc  30.0 rot (ncpus1) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+10671 2692 m
+gs 1 -1 sc (X) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+7683 3647 m
+gs 1 -1 sc (Y) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+9031 2614 m
+gs 1 -1 sc (0) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+9544 2304 m
+gs 1 -1 sc (1) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+9918 2003 m
+gs 1 -1 sc (2) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+8347 2591 m
+gs 1 -1 sc (3) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+8817 2282 m
+gs 1 -1 sc (4) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+9264 2002 m
+gs 1 -1 sc (5) col0 sh gr
+/Times-BoldItalic ff 190.50 scf sf
+9804 3456 m
+gs 1 -1 sc  30.0 rot (ncpus1) col0 sh gr
+/Times-BoldItalic ff 190.50 scf sf
+8270 3780 m
+gs 1 -1 sc (ncpus2) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+9049 2907 m
+gs 1 -1 sc (Z) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+4512 4060 m
+gs 1 -1 sc (alongY distribution) col0 sh gr
+/Times-Roman ff 190.50 scf sf
+7853 4037 m
+gs 1 -1 sc (alongZ distribution) col0 sh gr
+% here ends figure;
+$F2psEnd
+rs
+showpage
+%%Trailer
+%EOF
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/images/yzcommunicator.fig b/HySoP/src/Unstable/LEGI/doc/doxygen/images/yzcommunicator.fig
new file mode 100644
index 0000000000000000000000000000000000000000..be95b8cffe359a9ad59111684da9bf0933d36236
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/doc/doxygen/images/yzcommunicator.fig
@@ -0,0 +1,61 @@
+#FIG 3.2  Produced by xfig version 3.2.5
+Landscape
+Center
+Metric
+A4      
+100.00
+Single
+-2
+1200 2
+5 1 0 1 0 7 51 -1 -1 0.000 0 1 1 0 7125.552 3774.712 7554 4082 7418 3336 6634 3584
+	2 1 1.00 120.00 165.00
+2 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 4500 2700 5850 2700 5850 3600 4500 3600 4500 2700
+2 1 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 7
+	 4500 2700 5850 1800 7200 1800 5850 2700 5850 3600 7200 2700
+	 7200 1800
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 4500 3150 5850 3150
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 5850 3150 7200 2250
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+	 6300 3285 6300 2385 4950 2385
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+	 6750 3015 6750 2115 5355 2115
+2 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 7863 2702 9213 2702 9213 3602 7863 3602 7863 2702
+2 1 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 7
+	 7863 2702 9213 1802 10563 1802 9213 2702 9213 3602 10563 2702
+	 10563 1802
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 9879 1790 8533 2694
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+	 10122 3003 10122 2084 8761 2077
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+	 9680 3305 9680 2386 8313 2393
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 8548 2687 8548 3606
+4 0 0 50 -1 0 12 0.0000 4 135 105 6030 2835 3\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 6030 3330 0\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 6480 3015 1\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 6930 2700 2\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 6930 2250 5\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 6435 2565 4\001
+4 0 0 50 -1 0 12 0.0000 4 135 120 5760 2610 Z\001
+4 0 0 50 -1 0 12 0.0000 4 135 135 4275 3690 Y\001
+4 0 0 50 -1 0 12 0.0000 4 135 135 7290 2700 X\001
+4 0 0 50 -1 3 12 1.5708 4 165 585 4455 3420 ncpus2\001
+4 0 0 50 -1 3 12 0.5236 4 165 585 6460 3388 ncpus1\001
+4 0 0 50 -1 0 12 0.0000 4 135 135 10671 2692 X\001
+4 0 0 50 -1 0 12 0.0000 4 135 135 7683 3647 Y\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 9031 2614 0\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 9544 2304 1\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 9918 2003 2\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 8347 2591 3\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 8817 2282 4\001
+4 0 0 50 -1 0 12 0.0000 4 135 105 9264 2002 5\001
+4 0 0 50 -1 3 12 0.5236 4 165 585 9804 3456 ncpus1\001
+4 0 0 50 -1 3 12 0.0000 4 165 585 8270 3780 ncpus2\001
+4 0 0 50 -1 0 12 0.0000 4 135 120 9049 2907 Z\001
+4 0 0 50 -1 0 12 0.0000 4 180 1605 4512 4060 alongY distribution\001
+4 0 0 50 -1 0 12 0.0000 4 180 1590 7853 4037 alongZ distribution\001
diff --git a/HySoP/src/Unstable/LEGI/doc/doxygen/images/yzcommunicator.png b/HySoP/src/Unstable/LEGI/doc/doxygen/images/yzcommunicator.png
new file mode 100644
index 0000000000000000000000000000000000000000..6a87434637ccec072559ae23894300d310764ca4
Binary files /dev/null and b/HySoP/src/Unstable/LEGI/doc/doxygen/images/yzcommunicator.png differ
diff --git a/HySoP/src/Unstable/LEGI/example/CMakeLists.txt b/HySoP/src/Unstable/LEGI/example/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..febd4f0ab6f826fc669a9047b2c86fd7dc8c351d
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/example/CMakeLists.txt
@@ -0,0 +1 @@
+add_subdirectory(src)
diff --git a/HySoP/src/Unstable/LEGI/example/advec/shear_tag b/HySoP/src/Unstable/LEGI/example/advec/shear_tag
new file mode 100755
index 0000000000000000000000000000000000000000..6d8fb7de7294159d887edb1d63b56d5bc437afd2
Binary files /dev/null and b/HySoP/src/Unstable/LEGI/example/advec/shear_tag differ
diff --git a/HySoP/src/Unstable/LEGI/example/advec/turn_sphere b/HySoP/src/Unstable/LEGI/example/advec/turn_sphere
new file mode 100755
index 0000000000000000000000000000000000000000..5f9e61b8db4885a59e619527d4779ccdf4113100
Binary files /dev/null and b/HySoP/src/Unstable/LEGI/example/advec/turn_sphere differ
diff --git a/HySoP/src/Unstable/LEGI/example/src/CMakeLists.txt b/HySoP/src/Unstable/LEGI/example/src/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e12c5b53904c7ffd8d5fdaa105a66000751e62e3
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/example/src/CMakeLists.txt
@@ -0,0 +1,45 @@
+set(EXECUTABLE_OUTPUT_PATH "${EXAMPLE_EXE_DIR}")
+include_directories(${CMAKE_Fortran_MODULE_DIRECTORY})
+
+# ===== Example and benchmark about advection equation and the particular solver =====
+set(EXECUTABLE_OUTPUT_PATH "${EXAMPLE_EXE_DIR}/advec")
+# --- Simple turning sphere ---
+set(EXAMPLE_NAME turn_sphere)
+    # Test file
+    list(APPEND ${EXAMPLE_NAME}_SRC "advec/${EXAMPLE_NAME}.f90")
+    list(APPEND ${EXAMPLE_NAME}_SRC "../../test/src/test_common.f90")
+    list(APPEND ${EXAMPLE_NAME}_SRC "${${EXE_NAME}_SRCDIRS}/input_output/vtkxml.f90")
+    # General environment and utilities (mesh, io, ...)
+    file(GLOB ${EXAMPLE_NAME}_LIB ${${EXE_NAME}_SRCDIRS}/layout/cart*.f90 )
+    if(${EXAMPLE_NAME}_LIB)
+        list(APPEND ${EXAMPLE_NAME}_SRC ${${EXAMPLE_NAME}_LIB})
+    endif()
+    # Particular solver
+    file(GLOB ${EXAMPLE_NAME}_LIB  ${${EXE_NAME}_SRCDIRS}/particle/*.f90)
+    if(${EXAMPLE_NAME}_LIB)
+        list(APPEND ${EXAMPLE_NAME}_SRC ${${EXAMPLE_NAME}_LIB})
+    endif()
+add_executable(${EXAMPLE_NAME} ${${EXAMPLE_NAME}_SRC})
+target_link_libraries(${EXAMPLE_NAME} ${LIBS})
+
+# --- Case test with shear ---
+# In this example, velocity radial component vanishes and as scalar is initialized  with cylindric symetry, it stays constant.
+# It allow to compare solver with large time step (eg cfl number bigger than one)
+set(EXAMPLE_NAME shear_tag)
+    # Test file
+    list(APPEND ${EXAMPLE_NAME}_SRC "advec/${EXAMPLE_NAME}.f90")
+    list(APPEND ${EXAMPLE_NAME}_SRC "../../test/src/test_common.f90")
+    list(APPEND ${EXAMPLE_NAME}_SRC "${${EXE_NAME}_SRCDIRS}/input_output/vtkxml.f90")
+    # General environment and utilities (mesh, io, ...)
+    file(GLOB ${EXAMPLE_NAME}_LIB ${${EXE_NAME}_SRCDIRS}/layout/cart*.f90 )
+    if(${EXAMPLE_NAME}_LIB)
+        list(APPEND ${EXAMPLE_NAME}_SRC ${${EXAMPLE_NAME}_LIB})
+    endif()
+    # Particular solver
+    file(GLOB ${EXAMPLE_NAME}_LIB  ${${EXE_NAME}_SRCDIRS}/particle/*.f90)
+    if(${EXAMPLE_NAME}_LIB)
+        list(APPEND ${EXAMPLE_NAME}_SRC ${${EXAMPLE_NAME}_LIB})
+    endif()
+add_executable(${EXAMPLE_NAME} ${${EXAMPLE_NAME}_SRC})
+target_link_libraries(${EXAMPLE_NAME} ${LIBS})
+
diff --git a/HySoP/src/Unstable/LEGI/example/src/advec/shear_tag.f90 b/HySoP/src/Unstable/LEGI/example/src/advec/shear_tag.f90
new file mode 100644
index 0000000000000000000000000000000000000000..4451438c6276eaed889649aee5064632985e70f9
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/example/src/advec/shear_tag.f90
@@ -0,0 +1,161 @@
+!------------------------------------------------------------------------------
+!
+! PROGRAM : advec_sheartag
+!
+! DESCRIPTION: 
+!> This program provide a numerical illustration of an adveciton problem: a
+!! velocity field with radial shear. This test illustrate the effect of
+!corrected remeshing formula.
+!!
+!! @details
+!! This example could be used as a benchmark (to determine optimisation
+!! efficiency or parallel scalability). The velocity field is choosen to ensure
+!! That the solver will imply tag and thus corrected remeshing formula. It is a
+!! Typical test cases to evaluate efficiency of such a formula in context of large
+!! Cfl number (eg 3 or more).
+!! Note that the analyti solution is known and could be check. 
+!!
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+program advec_sheartag
+
+    ! External Library
+    use mpi
+    ! Scales code
+    use cart_topology
+    use advec
+    use vtkxml
+    ! Test procedures
+    use test_common
+
+    implicit none
+
+    logical, parameter      :: output = .false.     ! Do you want output for
+                                                    ! vizualisation purpose ?
+    real(WP), parameter     :: M_PI = ACOS(-1.0)    ! Pi value
+    real(WP),parameter      :: period = 1
+
+    logical     :: success = .true. ! logical error
+    integer     :: ierr             ! mpi error code
+    integer     :: rank_world       ! processus rank on "MPI_COMM_WORLD"
+    integer     :: nb_proc,nb_procZ ! number of processus
+
+    character(str_short)                        :: order='p_02'     ! space order of the solveur
+    real(WP), dimension(:, :, :), allocatable   :: Vx, Vy, VZ       ! the flow
+    real(WP), dimension(:, :, :), allocatable   :: scal3D           ! the scalar field
+    integer                                     :: i,j,k            ! mesh indice   
+    real(WP)                                    :: T_step           ! time where output are done
+    real(WP)                                    :: T_end            ! final time
+    real(WP)                                    :: T                ! current time
+    real(WP)                                    :: dt               ! time step
+    real(WP), dimension(:, :, :), allocatable   :: good_scal        ! analytic solution
+    integer                                     :: tag_num,tag_err  ! identifiant for io
+    integer                                     :: tag_sol          ! identifiant for io
+    real(WP)                                    :: rx, ry, rr
+    integer, parameter                          :: N_mesh=400       ! number of mesh
+   
+    ! ===== Initialisation of parallel context =====
+
+    ! Set the verbosity
+    verbose_test = .true.
+    verbose_more = .true.
+    ! Initialise mpi
+    call mpi_init(ierr)    
+    call mpi_comm_rank(MPI_COMM_WORLD, rank_world, ierr)
+    call mpi_comm_size(MPI_COMM_WORLD, nb_proc, ierr)
+
+    ! ===== Cut the domain along y and initialize the topology =====
+    nb_procZ = 1
+    if (mod(N_mesh, nb_proc)/=0) stop 'wrong number of processes : it have to divide 100'
+    call cart_create((/ nb_proc, nb_procZ /), ierr)
+
+    ! ===== Create mesh =====
+    call discretisation_create(N_mesh,N_mesh,20,dble(2),dble(2),dble(0.5))
+    call set_group_size(5)
+    call advec_init(order)
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+
+    ! ===== Field allocation =====
+    allocate(scal3D(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(good_scal(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vx(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vy(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vz(N_proc(1), N_proc(2), N_proc(3)))
+
+    ! ===== Initialization ====
+    Vx = 0.0
+    Vy = 0.0
+    Vz = 0.0
+    do k = 1, N_proc(3)
+        do j = 1, N_proc(2)
+            ry = ((j-1+coord(2)*N_proc(2))*d_sc(2)) - length(2)/2.0
+            do i = 1, N_proc(1)
+                rx = ((i-1+coord(1)*N_proc(1))*d_sc(1))-length(1)/2.0
+                rr = (rx**2+ry**2)
+                if (rr<1) then
+                    scal3D(i,j,k) = (1-rr)**6
+                else
+                    scal3D(i,j,k) = 0
+                end if
+                Vx(i,j,k) = cos(3*M_PI*sqrt(rr)/2)*(-ry)
+                Vy(i,j,k) = cos(3*M_PI*sqrt(rr)/2)*(rx)
+            end do
+        end do
+    end do
+    good_scal = scal3D
+    T = 0.0
+    dt = 3*min(d_sc(1),d_sc(2))
+    T_step = 0.0
+    T_end = 0.8
+    call test_substatus('cfl=3 and dt', dt, cart_rank)
+
+    ! === Create output context and solve initial state =====
+    if (output) then
+        call vtkxml_init_all(9, nb_proc_dim, length, cart_rank, coord,'./adv_res/')
+        call vtkxml_init_field(trim(order)//'_tag_num', tag_num)
+        call vtkxml_init_field(trim(order)//'_tag_err', tag_err)
+        call vtkxml_init_field(trim(order)//'_tag_sol', tag_sol)
+        call vtkxml_write(tag_sol, good_scal)
+    end if
+
+
+    ! ===== Solve equation ====
+    do while(T< (T_end - dt))
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+        T = T + dt
+        if (T>T_step) then
+            T_step = T_step + period/10
+            call test_substatus ('tag, t', T, cart_rank)
+            if (output) call vtkxml_write(tag_num, scal3D)
+        end if
+    end do
+    if (T<T_end) then
+        dt = T_end - T
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+    end if
+    if (output) then
+        call vtkxml_write(tag_num, scal3D)
+        call vtkxml_write(tag_err, scal3D-good_scal)
+        call vtkxml_finish()
+    end if
+    call test_substatus ('time', (T+dt), cart_rank)
+    call test_check_success(scal3D, good_scal, success, cart_rank)
+    call test_substatus('3D test - with tag', success, cart_rank)
+
+    
+    ! --- Free memory --- 
+    deallocate(scal3D)
+    deallocate(good_scal)
+    deallocate(Vx)
+    deallocate(Vy)
+    deallocate(Vz)
+
+    success = .not.success
+
+    call mpi_finalize(ierr)
+
+end program advec_sheartag
diff --git a/HySoP/src/Unstable/LEGI/example/src/advec/turn_sphere.f90 b/HySoP/src/Unstable/LEGI/example/src/advec/turn_sphere.f90
new file mode 100644
index 0000000000000000000000000000000000000000..d87c3c9855c0c62ebfa695309db630a84c6f550d
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/example/src/advec/turn_sphere.f90
@@ -0,0 +1,186 @@
+!------------------------------------------------------------------------------
+!
+! PROGRAM : advec_turnsphere
+!
+! DESCRIPTION: 
+!> This program provide a numerical illustration of an adveciton problem: a
+!! turning sphere.
+!!
+!! @details
+!! This example could be used as a benchmark (to determine optimisation
+!! efficiency or parallel scalability) 
+!! Note that the analyti solution is known and could be check. 
+!!
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+program advec_turnsphere
+
+    ! External Library
+    use mpi
+    ! Scales code
+    use advec
+    use cart_topology
+    use vtkxml
+    ! Test procedures
+    use test_common
+
+    implicit none
+    
+    real(WP), parameter     :: M_PI = ACOS(-1.0)    ! Pi value
+    logical, parameter      :: output = .true.     ! Do you want output for
+                                                    ! vizualisation purpose ?
+    real(WP),parameter      :: period = 1
+    integer, parameter      :: mesh_size = 160
+
+    logical     :: success = .true. ! logical error
+    integer     :: ierr             ! mpi error code
+    integer     :: rank_world       ! processus rank on "MPI_COMM_WORLD"
+    integer     :: nb_proc,nb_procZ ! number of processus
+
+    real(WP), dimension(:, :, :), allocatable   :: scal3D           ! the scalar field
+    real(WP), dimension(:, :, :), allocatable   :: Vx, Vy, Vz       ! the flow
+    integer                                     :: i,j,k            ! mesh indice   
+    real(WP)                                    :: rx, ry, rz, rr   ! distance**2 to the mesh center
+    real(WP)                                    :: rayon            ! rayon of the initial scalar sphere
+    real(WP)                                    :: T                ! time
+    real(WP)                                    :: T_end            ! final time
+    real(WP)                                    :: T_ite            ! time corresponding to output
+    real(WP)                                    :: dt               ! time step
+    real(WP), dimension(:, :, :), allocatable   :: good_scal        ! analytic solution
+    real(WP), dimension(:, :, :), allocatable   :: good_velo        ! temp field
+    integer                                     :: tag_rot          ! tag for visualisation context
+    integer                                     :: tag_sol          ! tag for visualisation context
+    real(WP)                                    :: times, time1, time2 ! to evaluate computation time.
+   
+    ! ===== Initialisation of parallel context =====
+
+    ! Set the verbosity
+    verbose_test = .true.
+    verbose_more = .true.
+    ! Initialise mpi
+    call mpi_init(ierr)    
+    call mpi_comm_rank(MPI_COMM_WORLD, rank_world, ierr)
+    call mpi_comm_size(MPI_COMM_WORLD, nb_proc, ierr)
+
+    ! Cut the domain along Y and initialize the toppology
+    nb_procZ = 1
+    if ((mod(nb_proc,5)==0).and.(mod(mesh_size, nb_proc/5)==0)) then
+        nb_procZ = 5
+        nb_proc = nb_proc/5
+    else if ((mod(nb_proc,2)==0).and.(mod(mesh_size, nb_proc/2)==0)) then
+        nb_procZ = 2
+        nb_proc = nb_proc/2
+    else
+        if (mod(mesh_size, nb_proc)/=0) then
+            if(cart_rank==0) write(*,'(a,x,i0)') 'wrong number of processes : it have to divide', mesh_size
+            stop
+        end if
+    end if
+
+    ! ===== Create mesh ====
+    call cart_create((/ nb_proc, nb_procZ /), ierr)
+    call set_group_size(5)
+    call discretisation_create(mesh_size,mesh_size,mesh_size,dble(1),dble(1),dble(1))
+    call test_substatus('group size', group_size(1,1), cart_rank)
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+
+    ! ===== Initialize the particular solver =====
+    call advec_init('p_O2')
+
+    ! ===== Field allocation =====
+    allocate(scal3D(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(good_scal(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vx(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vy(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vz(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(good_velo(N_proc(1), N_proc(2), N_proc(3)))
+
+    ! ===== Initialization ====
+    ! -- Scalar --
+    scal3D = 0.
+    rayon = (minval(N*d_sc)/10.0)**2
+    do k = 1, N_proc(3)
+        rz = (d_sc(3)*(k + coord(3)*N_proc(3) - 3.0*N(3)/5.0))**2
+        do j = 1, N_proc(2)
+            ry = (d_sc(2)*(j + coord(2)*N_proc(2)- 3.0*N(2)/5.0))**2
+            do i = 1, N_proc(1)
+                rx = (d_sc(1)*(i - 3.0*N(1)/5.0))**2
+                rr = rx + ry + rz
+                if (rr < rayon) scal3D(i,j,k) = (1 - rr/rayon)**1
+            end do
+        end do
+    end do
+    good_scal = scal3D
+    ! -- Velocity --
+    do k = 1, N_proc(3)
+        do j = 1, N_proc(2)
+            do i = 1, N_proc(1)
+                Vx(i,j,k)=(2*M_PI/period)*(length(2)/2.0-((j+coord(2)*N_proc(2))*d_sc(2)))
+                Vy(i,j,k)=(2*M_PI/period)*(((i+coord(1)*N_proc(1))*d_sc(1))-length(1)/2.0)
+            end do
+        end do
+    end do
+    Vz = 0.0
+
+
+
+    ! ===== Initialize output context ====
+    if (output) then
+        call vtkxml_init_all(2, nb_proc_dim, length, cart_rank, coord,'./adv_res/')
+        call vtkxml_init_field('turning', tag_rot)
+        call vtkxml_init_field('turn_sol', tag_sol)
+        call vtkxml_write(tag_rot, scal3D, 'turning')
+    end if
+
+
+    ! ===== Compute the numerical solution =====
+    dt = 0.02
+    T = 0
+    T_end = 2*period
+    T_ite = 0
+    times = 0
+    do while (T<=T_end - dt)
+        call cpu_time(time1)
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+        T = T + dt
+        call cpu_time(time2)
+        times=times+(time2-time1)
+        if (T>T_ite) then
+            T_ite = T_ite + T_end/10
+            call test_substatus ('time', T, cart_rank)
+            if (output) call vtkxml_write(tag_rot, scal3D, 'turning')
+        end if
+    end do
+    if (T<T_end) then
+        dt = T_end - T
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+    end if
+    if (output) then
+        call vtkxml_write(tag_rot, scal3D, 'turning')
+        call vtkxml_write(tag_sol, good_scal)
+        call vtkxml_finish()
+    end if
+
+    call test_check_success(scal3D, good_scal, success, cart_rank)
+print*, 'times = ', times
+    call test_substatus('computational time', times, cart_rank)
+    call test_substatus('turning sphere', success, cart_rank)
+
+
+    ! --- Free memory ---
+    deallocate(scal3D)
+    deallocate(good_scal)
+    deallocate(Vx)
+    deallocate(Vy)
+    deallocate(Vz)
+
+    success = .not.success
+
+
+    call mpi_finalize(ierr)
+
+end program advec_turnsphere
diff --git a/HySoP/src/Unstable/LEGI/test/src/CMakeLists.txt b/HySoP/src/Unstable/LEGI/test/src/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f72a850e46326ee83100366167814046dba1cbe5
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/test/src/CMakeLists.txt
@@ -0,0 +1,53 @@
+set(EXECUTABLE_OUTPUT_PATH "${TEST_EXE_DIR}")
+include_directories(${CMAKE_Fortran_MODULE_DIRECTORY})
+
+# ===== Test the parallel topology and how it interact with the "global" datalayout (used in the spectral code) =====
+set(TEST_NAME Test_topo)
+    file(GLOB ${TEST_NAME}_FILES ${TEST_NAME}/*.f90)
+    if(${TEST_NAME}_FILES)
+        list(APPEND ${TEST_NAME}_SRC ${${TEST_NAME}_FILES})
+    endif()
+    list(APPEND ${TEST_NAME}_SRC "test_common.f90")
+    list(APPEND ${TEST_NAME}_SRC "${${EXE_NAME}_SRCDIRS}/cart_topology.f90")
+add_executable(${TEST_NAME} ${${TEST_NAME}_SRC})
+target_link_libraries(${TEST_NAME} ${LIBS})
+
+# ===== Test the parallel output =====
+set(TEST_NAME Test_io)
+    file(GLOB ${TEST_NAME}_FILES ${TEST_NAME}/*.f90)
+    if(${TEST_NAME}_FILES)
+        list(APPEND ${TEST_NAME}_SRC ${${TEST_NAME}_FILES})
+    endif()
+    list(APPEND ${TEST_NAME}_SRC "test_common.f90")
+    # Tested procedures
+    list(APPEND ${TEST_NAME}_SRC "${${EXE_NAME}_SRCDIRS}/cart_topology.f90")
+    list(APPEND ${TEST_NAME}_SRC "${${EXE_NAME}_SRCDIRS}/cart_mesh.f90")
+    file(GLOB ${TEST_NAME}_LIB  ${${EXE_NAME}_SRCDIRS}/output/*.f90)
+    if(${TEST_NAME}_LIB)
+        list(APPEND ${TEST_NAME}_SRC ${${TEST_NAME}_LIB})
+    endif()
+add_executable(${TEST_NAME} ${${TEST_NAME}_SRC})
+target_link_libraries(${TEST_NAME} ${LIBS})
+
+# ===== Test the advection and the particular solver =====
+set(TEST_NAME Test_advec)
+    # Test file
+    file(GLOB ${TEST_NAME}_FILES ${TEST_NAME}/*.f90)
+    if(${TEST_NAME}_FILES)
+         list(APPEND ${TEST_NAME}_SRC ${${TEST_NAME}_FILES})
+    endif()
+    list(APPEND ${TEST_NAME}_SRC "test_common.f90")
+    # General environment and utilities (mesh, io, ...)
+    list(APPEND ${TEST_NAME}_SRC "${${EXE_NAME}_SRCDIRS}/cart_topology.f90")
+    list(APPEND ${TEST_NAME}_SRC "${${EXE_NAME}_SRCDIRS}/cart_mesh.f90")
+    file(GLOB ${TEST_NAME}_LIB  ${${EXE_NAME}_SRCDIRS}/output/*.f90)
+    if(${TEST_NAME}_LIB)
+        list(APPEND ${TEST_NAME}_SRC ${${TEST_NAME}_LIB})
+    endif()
+    # Tested solver
+    file(GLOB ${TEST_NAME}_LIB  ${${EXE_NAME}_SRCDIRS}/particle/*.f90)
+    if(${TEST_NAME}_LIB)
+        list(APPEND ${TEST_NAME}_SRC ${${TEST_NAME}_LIB})
+    endif()
+add_executable(${TEST_NAME} ${${TEST_NAME}_SRC})
+target_link_libraries(${TEST_NAME} ${LIBS})
diff --git a/HySoP/src/Unstable/LEGI/test/src/Test_advec/advec_aux.f90 b/HySoP/src/Unstable/LEGI/test/src/Test_advec/advec_aux.f90
new file mode 100644
index 0000000000000000000000000000000000000000..b2a738caea7b3885ad270086d3e0d22212094ac5
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/test/src/Test_advec/advec_aux.f90
@@ -0,0 +1,775 @@
+!> @addtogroup part_test
+!! @{
+    
+!------------------------------------------------------------------------------
+!
+! MODULE: test_advection
+!
+! DESCRIPTION: 
+!> Validation test for advection method.
+!!
+!! @details
+!! This module provide different test to validate the transport solver. 
+!! All these test are unit test : they return a logical value to check if 
+!! the code version pass it or not. 
+!!
+!! That is all these test are logical function, they return true if the result
+!! is the right one and false otherwise. 
+!! All the "test_part_*" function are devoted to validate the particular solver
+!! The following test are included :
+!!      A - Validate the particular method, step by step
+!!      1 -> Test the procedure "AC_obtain_senders" from advec_common
+!!      2 -> Validate the redistribution the buffer during the remeshing (XXX todo)
+!!      3 -> Validate the remeshing of untagged particles
+!!      4 -> Validate the remeshing of tagged particles (XXX todo)
+!!      B - Validate an advection solver (XXX todo)
+!!      1 -> advec a ball with a constant velocity
+!!      2 -> advec a ball with a spheric velocity field (the ball turns)
+!!
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_aux
+
+    use string
+    use precision
+    implicit none
+
+    real(WP), private   :: epsilon_error = 1e-4     ! Error tolerance
+
+
+    ! Public procedures
+
+    ! ===== Test for the particles solver =====
+    ! Public function
+    public              :: test_part_remesh_no_tag
+    public              :: test_part_advec_1D
+    public              :: test_part_advec_3D
+    public              :: test_advecY
+
+
+    ! ===== Generic test for an advection solver =====
+
+    ! Private procedure
+
+contains
+
+!> Particles method: validation of the remeshing of untagged particles
+!!     @param[in]   init_scal   =  optional parameter to initialise the scalar fied to
+!!                                  a constant one or to a sphere shape
+!!     @param[in]   order_opt   =  optional parameter to choose the remeshing formula
+!!     @return      error       = test error (= false if the code pass the test) (= not success)
+function test_part_remesh_no_tag (init_scal, order_opt) result(success)
+
+    ! Library
+    use mpi
+    ! Scales code
+    use advec
+    use advecY
+    use advec_common    ! Some porcedure common to advection along all directions
+    use advec_variables ! contains info about solver parameters and others.
+    use cart_topology
+    ! Test procedures
+    use advec_aux_init
+    use test_common
+
+    logical                                     :: success
+    character(len=*), intent(in), optional      :: init_scal
+    character(len=*), intent(in), optional      :: order_opt        ! space order of the solveur
+
+    character(len=17)                           :: initialisation   ! to choose how to initialise the scalar field
+    character(len=str_short)                    :: order            ! space order of the solveur
+    real(WP), dimension(:, :, :), allocatable   :: scalar           ! the scalar field
+    real(WP), dimension(:,:,:), allocatable     :: p_pos_adim, p_SC ! the adimensionned particles position and the scalar they advect
+    logical, dimension(:,:,:), allocatable      :: bl_type, bl_tag  ! type and tag of each particle bloc
+    real(WP)                                    :: velocity         ! constant velocity of the flux
+    integer                                     :: nb_proc          ! number of processes
+    integer                                     :: ierr             ! mpi error code
+    integer                                     :: i,j,k            ! mesh indice   
+    integer, dimension(2)                       :: ind_group        ! indice of the current group of line
+    integer                                     :: T_step           ! time
+    integer                                     :: T_end            ! final time
+    real(WP), dimension(:,:,:), allocatable     :: good_scal        ! analytic solution
+    real(WP), dimension(3)                      :: translat         ! to compute analytic solution
+
+    ! Initialize the particular solver
+    order = 'p_O2'
+    if (present(order_opt)) order = trim(order_opt)
+    call advec_init(order)
+
+    allocate(scalar(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(good_scal(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(p_pos_adim(N_proc(2),1,1))
+    allocate(p_SC(N_proc(2),1,1))
+    allocate(bl_type(1+N_proc(2)/bl_size,1,1))
+    allocate(bl_tag((N_proc(2)/bl_size),1,1))
+
+    if(present(init_scal)) then
+        initialisation = init_scal
+    else 
+        initialisation = 'constant'
+    end if
+    success = .true.
+
+    !call cart_create((/ 1, nb_proc, 1 /), ierr)
+    !call mesh_default()
+
+    ! Choose a velocity
+    velocity = N(2)/11.*d_sc(2)
+    T_step = 1
+    T_end = 1
+    T_end = T_step
+    translat = 0
+    translat(2) = - velocity*T_step
+    translat = translat/d_sc
+
+    ! Initialise the scalar field
+    call scal_init(initialisation, scalar, good_scal, translat)
+    call test_substatus('initialisation', success, cart_rank)
+
+    ! Choose to test remeshing for centered/left and tagged/untagged particles
+    bl_type = .true.
+    bl_tag  = .false.
+
+    ! Advec it with a particular method
+    do k = begin_proc(3), end_proc(3)
+    ind_group(2) = 0
+    !k = begin_proc(3)
+        ind_group(1) = 0
+        ind_group(2) = ind_group(2) + 1
+        do i = begin_proc(1), end_proc(1)
+        !i = begin_proc(1)
+            ind_group(1) = ind_group(1) + 1
+            ! Initialise the particles
+            p_SC(:,1,1) = scalar(i,:,k)
+            !scalar(i,:,k)=0
+            do j = begin_proc(2), end_proc(2)
+                p_pos_adim(j,1,1) = j
+            end do
+!            do T_step = 1, 11
+                ! Advection
+                p_pos_adim = p_pos_adim + T_step*velocity/d_sc(2)
+                ! Remeshing
+                allocate(send_group_min(1,1))
+                allocate(send_group_max(1,1))
+                if (order == 'p_O4') then
+                    call Yremesh_O4(ind_group, (/1,1/), p_pos_adim, bl_type, bl_tag, i, k, scalar)
+                else if (order == 'p_M6') then
+                    call Yremesh_Mprime6(ind_group, (/1,1/), p_pos_adim, i, k, scalar)
+                else
+                    call Yremesh_O2_group(ind_group, (/1,1/), p_pos_adim, bl_type, bl_tag, i, k, scalar)
+                end if
+                deallocate(send_group_min)
+                deallocate(send_group_max)
+!            end do
+        end do
+    end do
+
+    ! Check the final scalar field
+    call test_check_success(scalar, good_scal, success, cart_rank)
+
+    deallocate(scalar)
+    deallocate(p_pos_adim)
+    deallocate(p_SC)
+    deallocate(bl_type)
+    deallocate(bl_tag)
+
+    success = .not.success
+
+end function test_part_remesh_no_tag
+
+
+!> Particles method: validation of the advection along each direction
+!! individually with particle method - no tag case.
+!!     @param[in]   init_scal   =  optional parameter to initialise the scalar fied to
+!!                                  a constant one or to a sphere shape
+!!     @param[in]   shift       = global translation of indices (optional)
+!!     @param[in]   order_opt   =  optional parameter to choose the remeshing formula
+!!     @return      error       = test error (= false if the code pass the test) (= not success)
+!! @details
+!!    These tests are devoted to validate the advection solver based on particle
+!!    method. They can be used for other advection solvers too. Their specificity
+!!    is to test each configuration that could be encoutered in the order 2 solver
+!!    based on particle method. Therefore they test all cases without tag (and there-
+!!    fore corrected remeshing formula) for order 2 (or order 4) solver.
+function test_part_advec_1D(init_scal, shift, order_opt) result(success)
+
+    ! Library
+    use mpi
+    ! Scales code
+    use advec
+    use advecX
+    use advecY
+    use advecZ
+    use cart_topology
+    ! Test procedures
+    use advec_aux_init
+    use test_common
+
+    logical                                     :: success
+    character(len=*), intent(in), optional      :: init_scal
+    integer, intent(in), optional               :: shift
+    character(len=*), intent(in), optional      :: order_opt        ! space order of the solveur
+
+    character(str_short)                        :: initialisation   ! to choose how to initialise the fields
+    integer                                     :: shift_bis        ! shift effectly used in the test
+    character(str_short)                        :: order            ! space order of the solveur
+    real(WP), dimension(:, :, :), allocatable   :: scal3D           ! the scalar field
+    real(WP), dimension(:, :, :), allocatable   :: velo             ! the flow
+    real(WP), dimension(:, :, :), allocatable   :: Vx, Vy, VZ       ! the flow
+    integer                                     :: i,j,k            ! mesh indice   
+    real(WP)                                    :: T_step           ! time where output are done
+    real(WP)                                    :: T_end            ! final time
+    real(WP)                                    :: T                ! current time
+    real(WP)                                    :: dt               ! time step
+    real(WP), dimension(:, :, :), allocatable   :: good_scal        ! analytic solution
+    integer                                     :: direction        ! current direction
+    integer                                     :: tag_io,tag_er    ! identifiant for io
+    integer                                     :: tag_sol          ! identifiant for io
+
+    ! -- Mesh init --
+    call discretisation_create(80,80,80,dble(1),dble(1),dble(1))
+
+    ! -- Allocation --
+    allocate(scal3D(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(good_scal(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(velo(N_proc(1), N_proc(2), N_proc(3)))
+
+    ! -- Initialisation --
+    if(present(init_scal)) then
+        initialisation = init_scal
+    else 
+        initialisation = 'center'
+    end if
+    if (present(shift)) then
+        shift_bis = shift
+    else
+        shift_bis = 0
+    end if
+    success = .true.
+    dt = 0.1
+
+    call test_substatus('shift', shift_bis, cart_rank)
+
+    ! Initialize the particular solver
+    order = 'p_O2'
+    if (present(order_opt)) order = order_opt
+    call advec_init(order)
+
+    ! ===== Test along X =====
+    ! Initialise the velocity, the scalar field and compute the theoritical solution
+    direction = 1
+    call test_substatus('direction', direction, cart_rank)
+    call scal_velo_init_part(init_scal, dt, shift_bis, scal3D, velo, direction, good_scal)
+    call test_substatus('initialisation', success, cart_rank)
+
+    ! Advec it with a particular method
+    call advecX_calc(dt, velo, scal3D, order)
+    call test_check_success(scal3D, good_scal, success, cart_rank)
+    call test_substatus('advection along X', success, cart_rank)
+
+
+    ! ===== Test along Y =====
+    ! Initialise the velocity, the scalar field and compute the theoritical solution
+    direction = 2
+    call test_substatus('direction', direction, cart_rank)
+    call scal_velo_init_part(init_scal, dt, shift_bis, scal3D, velo, direction, good_scal)
+    call test_substatus('initialisation', success, cart_rank)
+
+    ! Advec it with a particular method
+    call advecY_calc(dt, velo, scal3D, order)
+    call test_check_success(scal3D, good_scal, success, cart_rank)
+    call test_substatus('advection along Y', success, cart_rank)
+
+
+    ! ===== Test along Z =====
+    ! Initialise the velocity, the scalar field and compute the theoritical solution
+    direction = 3
+    call test_substatus('direction', direction, cart_rank)
+    call scal_velo_init_part(init_scal, dt, shift_bis, scal3D, velo, direction, good_scal)
+    call test_substatus('initialisation', success, cart_rank)
+
+    ! Advec it with a particular method
+    call advecZ_calc(dt, velo, scal3D, order)
+    call test_check_success(scal3D, good_scal, success, cart_rank)
+    call test_substatus('advection along Z', success, cart_rank)
+
+
+
+    deallocate(scal3D)
+    deallocate(good_scal)
+    deallocate(velo)
+
+    success = .not.success
+
+end function test_part_advec_1D
+
+
+!> Particles method: validation of the advection in 3D cases.
+!!     @param[in]   order       =  optional parameter to choose between order 2
+!!                                  or order 4 particle method.
+!!     @param[in]   shift       = global translation of indices (optional)
+!!     @return      error       = test error (= false if the code pass the test) (= not success)
+!! @details
+!!    This tests are devoted to validate the advection solver based on particle
+!!    method on 3D cases. They can be used for other advection solvers too. Their specificity
+!!    is to test each configuration that could be encoutered in the order 2 and order 4
+!!    solver based on particle method. Supposing the 1D tests (test_part_advec_1D) have been
+!!    passed successful, then passed this test means the solver does not contain
+!!    error anymore.
+function test_part_advec_3D(shift, order) result(success)
+
+    ! Library
+    use mpi
+    ! Scales code
+    use advec
+    use cart_topology
+    use vtkxml
+    ! Test procedures
+    use advec_aux_init
+    use test_common
+
+    logical                                     :: success
+    character(len=*), intent(in), optional      :: order
+    integer, intent(in), optional               :: shift
+
+    character(str_short)                        :: initialisation   ! to choose how to initialise the fields
+    integer                                     :: shift_bis        ! shift effectly used in the test
+    character(str_short)                        :: order_bis        ! space order of the solveur
+    real(WP), dimension(:, :, :), allocatable   :: scal3D           ! the scalar field
+    real(WP), dimension(:, :, :), allocatable   :: velo             ! the flow
+    real(WP), dimension(:, :, :), allocatable   :: Vx, Vy, VZ       ! the flow
+    integer                                     :: i,j,k            ! mesh indice   
+    real(WP)                                    :: T_step           ! time where output are done
+    real(WP)                                    :: T_end            ! final time
+    real(WP)                                    :: T                ! current time
+    real(WP)                                    :: dt               ! time step
+    real(WP), dimension(:, :, :), allocatable   :: good_scal        ! analytic solution
+    integer                                     :: direction        ! current direction
+    integer                                     :: tag_num,tag_err  ! identifiant for io
+    integer                                     :: tag_sol          ! identifiant for io
+    real(WP)                                    :: rx, ry, rz, rr
+
+
+    ! -- Parameters initialisation --
+    call discretisation_create(80,80,80,dble(1),dble(1),dble(1))
+    call set_group_size(10)
+    if(present(order)) then
+        order_bis = order
+    else 
+        order_bis = 'p_O2'
+    end if
+    ! Initialize the particular solver
+    call advec_init(order_bis)
+    if (present(shift)) then
+        shift_bis = shift
+    else
+        shift_bis = 0
+    end if
+    call test_substatus('shift', shift_bis, cart_rank)
+    success = .true.
+    T_end = 1.0
+
+    ! -- Allocation --
+    allocate(scal3D(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(good_scal(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(velo(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vx(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vy(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vz(N_proc(1), N_proc(2), N_proc(3)))
+
+    ! Initialize output context
+    call vtkxml_init_all(9, nb_proc_dim, length, cart_rank, coord,'./adv_res/')
+
+    ! -- Fields initialisation --
+    ! ===== 3D velocity =====
+    ! Initialise the velocity, the scalar field and compute the theoritical solution
+    dt = 1
+    call scal_velo_init_part('center', dt, shift_bis, scal3D, velo, 1, good_scal)
+    dt = 0.1
+    good_scal = scal3D
+    velo = 1
+    call test_substatus('Start test : X,Y,Z,3D,tag', cart_rank)
+    
+
+    ! === Velocity along X ===
+    T = 0.0
+    T_step = 0.0
+    Vx = velo
+    Vy = 0
+    Vz = 0
+    call vtkxml_init_field(trim(order_bis)//'_X_num', tag_num)
+    call vtkxml_init_field(trim(order_bis)//'_X_err', tag_err)
+    call vtkxml_init_field(trim(order_bis)//'_X_sol', tag_sol)
+    call vtkxml_write(tag_sol, good_scal)
+    do while(T< (T_end - dt))
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+        T = T + dt
+        if (T>T_step) then
+            T_step = T_step + period/10
+            call test_substatus ('X, t', T, cart_rank)
+            call vtkxml_write(tag_num, scal3D)
+        end if
+    end do
+    if (T<T_end) then
+        dt = T_end - T
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+    end if
+    call vtkxml_write(tag_num, scal3D)
+    call vtkxml_write(tag_err, scal3D-good_scal)
+    call test_substatus ('X, t', (T+dt), cart_rank)
+    call test_check_success(scal3D, good_scal, success, cart_rank)
+    call test_substatus('3D test - V along X', success, cart_rank)
+
+    ! === Velocity along Y ===
+    T = 0.0
+    T_step = 0.0
+    Vx = 0
+    Vy = velo
+    Vz = 0
+    call vtkxml_init_field(trim(order_bis)//'_Y_num', tag_num)
+    call vtkxml_init_field(trim(order_bis)//'_Y_err', tag_err)
+    scal3D = good_scal
+    do while(T< (T_end - dt))
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+        T = T + dt
+        if (T>T_step) then
+            T_step = T_step + period/10
+            call test_substatus ('Y, t', T, cart_rank)
+            call vtkxml_write(tag_num, scal3D)
+        end if
+    end do
+    if (T<T_end) then
+        dt = T_end - T
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+    end if
+    call vtkxml_write(tag_num, scal3D)
+    call vtkxml_write(tag_err, scal3D-good_scal)
+    call test_check_success(scal3D, good_scal, success, cart_rank)
+    call test_substatus('3D test - V along Y', success, cart_rank)
+
+    ! === Velocity along Z ===
+    T = 0.0
+    T_step = 0.0
+    Vx = 0
+    Vy = 0
+    Vz = velo
+    dt = 0.1
+    T_end = 1
+    scal3D = good_scal
+    call vtkxml_init_field(trim(order_bis)//'_Z_num', tag_num)
+    call vtkxml_init_field(trim(order_bis)//'_Z_err', tag_err)
+    do while(T< (T_end - dt))
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+        T = T + dt
+        if (T>T_step) then
+            T_step = T_step + period/10
+            call test_substatus ('Z, t', T, cart_rank)
+            call vtkxml_write(tag_num, scal3D)
+        end if
+    end do
+    if (T<T_end) then
+        dt = T_end - T
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+    end if
+    call vtkxml_write(tag_num, scal3D)
+    call vtkxml_write(tag_err, scal3D-good_scal)
+    call test_check_success(scal3D, good_scal, success, cart_rank)
+    call test_substatus('3D test - V along Z', success, cart_rank)
+
+    ! === 3D Velocity  ===
+    call test_substatus('3D test - 3D velocity', cart_rank)
+    T = 0.0
+    T_step = 0.0
+    Vx = 2*velo
+    Vy = velo
+    Vz = 3*velo
+    call vtkxml_init_field(trim(order_bis)//'_3Dnum', tag_num)
+    call vtkxml_init_field(trim(order_bis)//'_3Derr', tag_err)
+    scal3D = good_scal
+    do while(T< (T_end - dt))
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+        T = T + dt
+        if (T>T_step) then
+            T_step = T_step + period/10
+            call test_substatus ('3D, t', T, cart_rank)
+            call vtkxml_write(tag_num, scal3D)
+       end if
+    end do
+   if (T<T_end) then
+        dt = T_end - T
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+    end if
+    call vtkxml_write(tag_num, scal3D)
+    call vtkxml_write(tag_err, scal3D-good_scal)
+    call test_check_success(scal3D, good_scal, success, cart_rank)
+    call test_substatus('3D test - 3D velo', success, cart_rank)
+
+    ! --- Free memory ---
+    call vtkxml_finish()
+    deallocate(Vx)
+    deallocate(Vy)
+    deallocate(Vz)
+    deallocate(scal3D)
+    deallocate(good_scal)
+    deallocate(velo)
+
+    ! === Case with corrected remeshing formula ===
+    call discretisation_create(400,400,16,dble(2),dble(2),dble(0.5))
+    call set_group_size(40)
+    call advec_init(order_bis)
+    ! -- Allocation --
+    allocate(scal3D(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(good_scal(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vx(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vy(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vz(N_proc(1), N_proc(2), N_proc(3)))
+    ! Initialize output context
+    call vtkxml_init_all(9, nb_proc_dim, length, cart_rank, coord,'./adv_res/')
+    call vtkxml_init_field(trim(order_bis)//'_tag_num', tag_num)
+    call vtkxml_init_field(trim(order_bis)//'_tag_err', tag_err)
+    call vtkxml_init_field(trim(order_bis)//'_tag_sol', tag_sol)
+    do k = 1, N_proc(3)
+        do j = 1, N_proc(2)
+            ry = ((j-1+coord(2)*N_proc(2))*d_sc(2)) - length(2)/2.0
+            do i = 1, N_proc(1)
+                rx = ((i-1+coord(1)*N_proc(1))*d_sc(1))-length(1)/2.0
+                rr = (rx**2+ry**2)
+                if (rr<1) then
+                    scal3D(i,j,k) = (1-rr)**6
+                else
+                    scal3D(i,j,k) = 0
+                end if
+                Vx(i,j,k) = cos(3*M_PI*sqrt(rr)/2)*(-ry)
+                Vy(i,j,k) = cos(3*M_PI*sqrt(rr)/2)*(rx)
+            end do
+        end do
+    end do
+    good_scal = scal3D
+    call vtkxml_write(tag_sol, good_scal)
+    Vz = 0.0
+    T = 0.0
+    dt = 3*min(d_sc(1),d_sc(2))
+    T_step = 0.0
+    T_end = 0.8
+    call test_substatus('cfl=3 and dt', dt, cart_rank)
+    do while(T< (T_end - dt))
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+        T = T + dt
+        if (T>T_step) then
+            T_step = T_step + period/10
+            call test_substatus ('tag, t', T, cart_rank)
+            call vtkxml_write(tag_num, scal3D)
+        end if
+    end do
+    if (T<T_end) then
+        dt = T_end - T
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+    end if
+    call vtkxml_write(tag_num, scal3D)
+    call vtkxml_write(tag_err, scal3D-good_scal)
+    call test_substatus ('time', (T+dt), cart_rank)
+    call test_check_success(scal3D, good_scal, success, cart_rank)
+    call test_substatus('3D test - with tag', success, cart_rank)
+
+
+    call discretisation_default()
+    call advec_init(order_bis)
+    
+    ! --- Free memory --- 
+    call vtkxml_finish()
+
+    success = .not.success
+
+end function test_part_advec_3D
+
+
+
+!> Test devoted to validate advection solver along one direction
+!!     @param[in]   init_scal   =  optional parameter to initialise the scalar fied to
+!!                                  a constant one or to a sphere shape
+!!     @return      error       = test error (= false if the code pass the test) (= not success)
+function test_advecY(init_scal) result(success)
+
+    ! Library
+    use mpi
+    ! Scales code
+    use advec
+    use advecY
+    use cart_topology
+    ! Test procedures
+    use advec_aux_init
+    use test_common
+
+    logical                                     :: success
+    character(len=*), intent(in), optional      :: init_scal
+
+    character(len=17)                           :: initialisation   ! to choose how to initialise the scalar field
+    character(len=str_short)                    :: order            ! space order of the solveur
+    real(WP), dimension(:, :, :), allocatable   :: scal3D           ! the scalar field
+    real(WP), dimension(:, :, :), allocatable   :: Vy               ! the flow
+    real(WP)                                    :: velocity         ! constant velocity of the flux
+    integer                                     :: ierr             ! mpi error code
+    integer                                     :: i,j,k            ! mesh indice   
+    integer                                     :: T_step           ! time
+    integer                                     :: T_end            ! final time
+    real(WP)                                    :: dt               ! time step
+    real(WP), dimension(:, :, :), allocatable   :: good_scal        ! analytic solution
+    real(WP), dimension(3)                      :: translat         ! to compute analytic solution
+
+    allocate(scal3D(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(good_scal(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vy(N_proc(1), N_proc(2), N_proc(3)))
+
+    if(present(init_scal)) then
+        initialisation = init_scal
+    else 
+        initialisation = 'constant'
+    end if
+    success = .true.
+
+
+    ! Initialize the particular solver
+    order = 'p_O2'
+    call advec_init(order)
+
+
+    ! Initialise the velocity
+    velocity = N(2)/11*d_sc(2)
+    Vy = velocity
+    T_end = 1
+    dt = 0.1
+    translat = 0
+    translat(2) = - velocity*T_end
+    translat = translat/d_sc
+
+    ! Initialise the scalar field
+    call scal_init(initialisation, scal3D, good_scal, translat)
+    call test_substatus('initialisation', success, cart_rank)
+
+    ! Advec it with a particular method
+    dt = 1
+    call advecY_calc(dt, Vy, scal3D, 'p_O2')
+    call test_check_success(scal3D, good_scal, success, cart_rank)
+    call test_substatus('advec along Y', success, cart_rank)
+
+
+    ! --- Free memory ---
+    deallocate(scal3D)
+    deallocate(good_scal)
+    deallocate(Vy)
+
+    success = .not.success
+
+end function test_advecY
+
+
+!> Test devoted to validate the 3D advection solver
+!!     @return      error       = test error (= false if the code pass the test) (= not success)
+function test_advec_rot() result(success)
+
+    ! Library
+    use mpi
+    ! Scales code
+    use advec
+    use cart_topology
+    use vtkxml
+    ! Test procedures
+    use advec_aux_init
+    use test_common
+
+    logical                                     :: success
+
+    character(len=17)                           :: init             ! to choose how to initialise the scalar field
+    character(len=str_short)                    :: order            ! space order of the solveur
+    real(WP), dimension(:, :, :), allocatable   :: scal3D           ! the scalar field
+    real(WP), dimension(:, :, :), allocatable   :: VX, Vy, Vz       ! the flow
+    real(WP)                                    :: velocity         ! constant velocity of the flux
+    integer                                     :: ierr             ! mpi error code
+    integer                                     :: i,j,k            ! mesh indice   
+    real(WP)                                    :: T                ! time
+    real(WP)                                    :: T_end            ! final time
+    real(WP)                                    :: T_ite            ! time corresponding to output
+    real(WP)                                    :: dt               ! time step
+    real(WP), dimension(:, :, :), allocatable   :: good_scal        ! analytic solution
+    real(WP), dimension(:, :, :), allocatable   :: good_velo        ! temp field
+    real(WP), dimension(:), allocatable         :: pos_adim         ! temp field
+    real(WP), dimension(3)                      :: translat         ! to compute analytic solution
+    integer                                     :: tag_rot          ! tag for visualisation context
+    integer                                     :: tag_sol          ! tag for visualisation context
+
+
+    allocate(scal3D(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(good_scal(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vx(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vy(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(Vz(N_proc(1), N_proc(2), N_proc(3)))
+    allocate(good_velo(N_proc(1), N_proc(2), N_proc(3)))
+
+    init = '2D_rot'
+    success = .true.
+    ! Initialize output context
+    call vtkxml_init_all(2, nb_proc_dim, length, cart_rank, coord,'./adv_res/')
+
+
+    ! Initialize the particular solver
+    order = 'p_O2'
+    call advec_init(order)
+    call vtkxml_init_field('turning', tag_rot)
+    call vtkxml_init_field('turn_sol', tag_sol)
+
+
+    ! Initialise the velocity
+    call scal_init(init, scal3D, good_scal)
+    period = 1
+    dt = 0.02
+    allocate(pos_adim(N_proc(1)))
+    pos_adim = 1
+    call velo_init(init, Vx, 1, pos_adim, good_velo)
+    call velo_init(init, Vy, 2, pos_adim, good_velo)
+    call velo_init(init, Vz, 3, pos_adim, good_velo)
+    call vtkxml_write(tag_rot, scal3D, 'turning')
+    call test_substatus('initialisation', success, cart_rank)
+
+    ! Advec it with a particular method
+    T = 0
+    T_end = 4*period
+    T_ite = 0
+    do while (T<=T_end - dt)
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+        T = T + dt
+        if (T>T_ite) then
+            T_ite = T_ite + T_end/30
+            call test_substatus ('time', T, cart_rank)
+            call vtkxml_write(tag_rot, scal3D, 'turning')
+        end if
+    end do
+    if (T<T_end) then
+        dt = T_end - T
+        call advec_step(dt, Vx, Vy, Vz, scal3D)
+    end if
+    call vtkxml_write(tag_rot, scal3D, 'turning')
+    call vtkxml_write(tag_sol, good_scal)
+
+    call test_check_success(scal3D, good_scal, success, cart_rank)
+    call test_substatus('turning sphere', success, cart_rank)
+
+
+    ! --- Free memory ---
+    call vtkxml_finish()
+    deallocate(scal3D)
+    deallocate(good_scal)
+    deallocate(Vx)
+    deallocate(Vy)
+    deallocate(Vz)
+
+    success = .not.success
+
+end function test_advec_rot
+
+end module advec_aux
+
+!> @}
diff --git a/HySoP/src/Unstable/LEGI/test/src/Test_advec/advec_aux_common.f90 b/HySoP/src/Unstable/LEGI/test/src/Test_advec/advec_aux_common.f90
new file mode 100644
index 0000000000000000000000000000000000000000..e4c09026a4b47a610e399482ae9dd081498a5ace
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/test/src/Test_advec/advec_aux_common.f90
@@ -0,0 +1,892 @@
+!> @addtogroup part_test
+!! @{
+    
+!------------------------------------------------------------------------------
+!
+! MODULE: test_part_common
+!
+! DESCRIPTION: 
+!> This module provides tests to validate the particular solver. It is
+!! more precisly focused on testing the "common part" (advec_common) used for each
+!! directions.
+!!
+!! @author
+!! Jean-Baptiste Lagaert, LEGI
+!!
+!! @details 
+!!  This module is devoted to validate all the procedure from "advec_common". All the
+!! tests are unit tests: they return a logical value to check if the code version pass
+!! it or not. 
+!!
+!! That is all these test are logical function, they return true if the result
+!! is the right one and false otherwise. 
+!! All the "test_part_*" function are devoted to validate the particular
+!! solver
+!! This module provide the following test: 
+!!    - Validate the particular method, step by step
+!!        - Test the procedure "AC_obtain_senders" from advec_common
+!!        - Validate the redistribution the buffer during the remeshing
+!!        - Validate the redistribution the buffer during the remeshing -
+!!            debug version : only one processus contains non-zero field.
+!!        - Validate the remeshing of untagged particles
+!!        - Validate the velocity interpolation (the RK2 scheme use the
+!!            velocity at midlle point to advec particles).
+!!        - Validate how type (left or center) and tag are computing for a
+!!            single line
+!!        - Validate how type (left or center) and tag are computing for a
+!!            group of line
+!!
+!
+!------------------------------------------------------------------------------
+
+module advec_aux_common
+
+    use string
+    use precision
+    implicit none
+
+
+
+    ! ===== Test for the particles solver =====
+    ! Public function
+    public              :: test_part_AC_velo_determine_com
+    !public              :: test_part_AC_bufferToScalar
+    !public              :: test_part_AC_bufferToScalar_Deb
+    !public              :: test_part_AC_interpol_velocity
+    public              :: test_part_AC_type_and_block_O2_group
+
+
+
+
+contains
+
+
+!!> Particles method: validation of the procedure "AC_bufferToScalar".
+!!!     @return      success = test success (= false if the code pass the test)
+!!!     @param[in]   shift   = global translation of indices (optional)
+!!! @details
+!!! Test the procedure "AC_obtain_senders" wich send local buffer use for remeshing
+!!! to the right processes. This procedure belong to "advec_common". 
+!function test_part_AC_bufferToScalar(shift) result(success)
+!
+!    ! Library
+!    use mpi
+!    ! Scale code
+!    use advec
+!    use advec_common    ! Some porcedure common to advection along all directions
+!    use advec_variables ! contains info about solver parameters and others.
+!    use cart_topology
+!    ! Test procdure
+!    use test_common
+!
+!    integer, intent(in), optional       :: shift
+!    logical                             :: success
+!
+!    integer                             :: shift_bis    ! global translation of indices
+!    character(str_short)                :: order        ! order of the particles solver
+!    integer                             :: j_min, j_max ! input argument of the tested procedure
+!    real(WP), dimension(:), allocatable :: send_buffer  ! the buffer to redistribute
+!    real(WP), dimension(:), allocatable :: scal1D       ! the scalar field
+!    integer                             :: direction    ! direction (2 if along Y, 3=along Z)
+!    real(WP)                            :: good_scal    ! theoritical value of scal1D
+!    integer, dimension(2)               :: ind_group    ! indice of current group of lines
+!    integer, dimension(2)               :: rece_proc    ! minimal and maximal gap between my coordinate and the one from which 
+!                                                        ! I will receive data
+!    integer                             :: proc_min     ! smaller gap between me and the processes to where I send data
+!    integer                             :: proc_max     ! smaller gap between me and the processes to where I send data
+!
+!    ! Some initialisation
+!    success = .true.
+!    ind_group = 1
+!    if (present(shift)) then
+!        shift_bis = shift
+!    else
+!        shift_bis = 0
+!    end if
+!    call test_substatus('shift', shift_bis, cart_rank)
+!    
+!    ! Initialize the particular solver
+!    order = 'p_O2'
+!    call advec_init(order, verbosity=.false.)
+!
+!    do direction = 1, 3
+!        if (nb_proc_dim(direction)>1) then 
+!            call test_substatus('direction', direction, cart_rank)
+!            good_scal = modulo(coord(direction)-shift_bis, nb_proc_dim(direction))
+!            
+!            ! Test 1 - unrealistic case with no communication
+!            ! Initialize the buffer to remesh
+!            call AC_set_part_bound_size(0)
+!            j_min = 1 + shift_bis*N_proc(direction)
+!            j_max = N_proc(direction)*(shift_bis+1)
+!            allocate (send_buffer(j_min:j_max))
+!            allocate (scal1D(1:N_proc(direction)))
+!            send_buffer = coord(direction)
+!            scal1D = 0.0
+!            ! Let's go !
+!            ! Determine the communication needed : who will communicate whit who?
+!            call AC_obtain_senders_line(j_min, j_max, direction, ind_group, proc_min, proc_max, rece_proc)
+!            ! And then distribute buffer along the processus
+!            call AC_bufferToScalar(direction, ind_group, j_min, j_max, proc_min, proc_max, rece_proc, send_buffer, scal1D)
+!            ! Check the success
+!            call test_check_success(scal1D, good_scal, success, cart_rank)
+!            deallocate (send_buffer)
+!            deallocate (scal1D)
+!            call test_substatus('just me', success, cart_rank)
+!
+!            ! I communicate with my two neighbors
+!            call advec_init(order, verbosity=.false.)
+!            j_min = shift_bis*N_proc(direction)
+!            j_max = (1+shift_bis)*N_proc(direction) -1+2*bl_bound_size
+!            allocate (send_buffer(j_min:j_max))
+!            allocate (scal1D(1:N_proc(direction)))
+!            send_buffer = coord(direction)
+!            send_buffer(j_min) = modulo(coord(direction)-1, nb_proc_dim(direction))/2.0
+!            send_buffer(j_min+1) = send_buffer(j_min+1)/2.
+!            send_buffer(j_max) = modulo(coord(direction)+1, nb_proc_dim(direction))/2.0
+!            send_buffer(j_max-1) = send_buffer(j_max-1)/2.
+!            scal1D = 0.0
+!            ! Determine the communication needed : who will communicate whit who?
+!            call AC_obtain_senders_line(j_min, j_max, direction, ind_group, proc_min, proc_max, rece_proc)
+!            ! And then distribute buffer along the processus
+!            call AC_bufferToScalar(direction, ind_group, j_min, j_max, proc_min, proc_max, rece_proc, send_buffer, scal1D)
+!            ! Check the success
+!            call test_check_success(scal1D, good_scal, success, cart_rank)
+!            deallocate (send_buffer)
+!            deallocate (scal1D)
+!            call test_substatus('me and my neighbors', success, cart_rank)
+!        else 
+!            call test_substatus('only one proc along direction, no test', cart_rank)
+!        end if
+!    end do
+!
+!    success = .not.success
+!
+!end function test_part_AC_bufferToScalar
+!
+!
+!
+!!> Particles method: validation of the procedure "AC_bufferToScalar"
+!!!     @return      success = test success (= false if the code pass the test)
+!!!     @param[in]   shift   = global translation of indices (optional)
+!!!     @param[in]   rank    = rank of the processus which will contains non-zero field.
+!!! @details
+!!!    Debugging version : only one processus contains non zero field to remesh.
+!!!    Each processus communicate with its two neibor. A shift could be add.
+!function test_part_AC_bufferToScalar_Deb(rank, shift) result(success)
+!
+!    ! Library
+!    use mpi
+!    ! Scale code
+!    use advec
+!    use advec_common    ! Some porcedure common to advection along all directions
+!    use advec_variables ! contains info about solver parameters and others.
+!    use cart_topology
+!    ! Test procdure
+!    use test_common
+!
+!    integer, intent(in)                 :: rank
+!    integer, intent(in), optional       :: shift
+!    logical                             :: success
+!
+!    integer                             :: ierr         ! mpi error code
+!    integer                             :: shift_bis    ! global translation of indices
+!    character(str_short)                :: order        ! order of the particles solver
+!    integer                             :: j_min, j_max ! input argument of the tested procedure
+!    real(WP), dimension(:), allocatable :: send_buffer  ! the buffer to redistribute
+!    real(WP), dimension(:), allocatable :: scal1D       ! the scalar field
+!    real(WP), dimension(:), allocatable :: good_scal    ! theoritical value of scal1D
+!    integer                             :: direction    ! direction (2 if along Y, 3=along Z)
+!    integer, dimension(3)               :: mycoord      ! my coordonates in the mpi topology
+!    integer                             :: rank_shift   ! rank of processus of rank = rank+shift
+!    integer, dimension(2)               :: ind_group    ! indice of current group of lines
+!    integer, dimension(2)               :: rece_proc    ! minimal and maximal gap between my coordinate and the one from which 
+!                                                        ! I will receive data
+!    integer                             :: proc_min     ! smaller gap between me and the processes to where I send data
+!    integer                             :: proc_max     ! smaller gap between me and the processes to where I send data
+!
+!    ! Some initialisation
+!    success = .true.
+!    ind_group = 1
+!    if (present(shift)) then
+!        shift_bis = shift
+!    else
+!        shift_bis = 0
+!    end if
+!    call test_substatus('shift', shift_bis, cart_rank)
+!    call test_substatus('rank tested', rank, cart_rank)
+!
+!    ! Initialize the particular solver
+!    order = 'p_O2'
+!    call advec_init(order, verbosity=.false.)
+!    
+!    do direction = 1, 3
+!        call test_substatus('direction', direction, cart_rank)
+!        if (nb_proc_dim(direction)>1) then 
+!            ! Initialise the solver environnement and the field
+!            call advec_init(order, verbosity=.false.)
+!            j_min = shift_bis*N_proc(direction)
+!            j_max = (1+shift_bis)*N_proc(direction) -1+2*bl_bound_size
+!            allocate (send_buffer(j_min:j_max))
+!            allocate (scal1D(1:N_proc(direction)))
+!            send_buffer = 0
+!            if (cart_rank == rank) then
+!                send_buffer = 2
+!                send_buffer(j_min) = 1
+!                send_buffer(j_max) = 3
+!            end if
+!            scal1D = 0.0
+!
+!            ! -- Compute the analytic solution --
+!            allocate (good_scal(1:N_proc(direction)))
+!            good_scal = 0.0
+!            ! For the processus wich correspond to me after the shift
+!            call mpi_cart_coords(cart_comm, rank, 3, mycoord, ierr)
+!            mycoord(direction) = mycoord(direction) + shift_bis
+!            call mpi_cart_rank(cart_comm, mycoord, rank_shift, ierr)
+!            if (cart_rank==rank_shift)   good_scal = 2
+!            ! For the next processus in the current direction
+!            mycoord(direction) = mycoord(direction) + 1
+!            call mpi_cart_rank(cart_comm, mycoord, rank_shift, ierr)
+!            if (cart_rank==rank_shift)   good_scal(1) = 3
+!            ! For the previous processus in the current direction
+!            mycoord(direction) = mycoord(direction) -2
+!            call mpi_cart_rank(cart_comm, mycoord, rank_shift, ierr)
+!            if (cart_rank==rank_shift)   good_scal(N_proc(direction)) = 1
+!
+!            ! -- Compute the numerical solution --
+!            ! Determine the communication needed : who will communicate whit who?
+!            call AC_obtain_senders_line(j_min, j_max, direction, ind_group, proc_min, proc_max, rece_proc)
+!            ! And then distribute buffer along the processus
+!            call AC_bufferToScalar(direction, ind_group, j_min, j_max, proc_min, proc_max, rece_proc, send_buffer, scal1D)
+!            ! Check the success
+!            call test_check_success(scal1D, good_scal, success, cart_rank)
+!            deallocate (send_buffer)
+!            deallocate (scal1D)
+!            deallocate (good_scal)
+!            call test_substatus('me and my neighbors', success, cart_rank)
+!        else 
+!            call test_substatus('only one proc along direction, no test', cart_rank)
+!        end if
+!    end do
+!
+!    success = .not.success
+!
+!
+!end function test_part_AC_bufferToScalar_Deb
+
+
+!> Particles method: validation of the procedure "AC_velocity_determine_communication" wich determine
+!! who will comunicate with who during the remeshing
+!!     @return      success = test success (= false if the code pass the test)
+!!     @param[in]   shift   = global translation of indices (optional)
+function test_part_AC_velo_determine_com(shift) result(success)
+
+    ! Library
+    use mpi
+    ! Scale code
+    use advec
+    use advec_common    ! Some porcedure common to advection along all directions
+    use advec_variables ! contains info about solver parameters and others.
+    use cart_topology
+    ! Test procdure
+    use test_common
+
+    integer, intent(in), optional               :: shift
+    logical                                     :: success          
+    ! Be aware : during this function, success = true if everything is right, but we return not success = error
+
+    integer                                     :: shift_bis    ! global translation of indices
+    character(str_short)                        :: order        ! order of the particles solver
+    integer                                     :: ierr         ! mpi error code
+    integer , dimension(5,5)                    :: rece_ind_min ! minimal indice of mesh involved in remeshing particles (of my local subdomains)
+    integer , dimension(5,5)                    :: rece_ind_max ! maximal indice of mesh involved in remeshing particles (of my local subdomains)
+    integer, dimension(5,5,2)                   :: rece_gap     ! distance between me and processus wich send me information
+    integer, dimension(2 , 2)                   :: send_gap     ! distance between me and processus to wich I send information
+    integer, dimension(2)                       :: rece_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(:), allocatable          :: rece_rank    ! rank of processus from which I receive data
+    integer, dimension(2 , 2)                   :: send_theoric ! theoritical value of send_gap
+    integer, dimension(2)                       :: ind_group    ! indice of current group of lines
+    integer                                     :: direction    ! current direction (alonG X, Y or Z)
+    integer, dimension(:,:), allocatable        :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block for wich the 
+                                                                ! current processus requiers data from proc_gap and for each of these lines the range 
+                                                                ! of mesh points from where it requiers the velocity values.
+    integer, dimension(:,:), allocatable        :: carto_th     ! theoritical value of cartography
+    integer                                     :: max_size
+    integer                                     :: proc_gap     ! gap between two different mpi processus
+    integer, dimension(2)                       :: gs           ! group size
+
+
+    ! Some initialisation
+    success = .true.
+    ind_group = 1
+    gs = (/5,5/)
+    direction = 3
+    if (present(shift)) then
+        shift_bis = shift
+    else
+        shift_bis = 0
+    end if
+    call test_substatus('shift', shift_bis, cart_rank)
+
+    ! Initialize the particular solver
+    order = 'p_O2'
+    call advec_init(order, verbosity=.false.)
+    call test_substatus('initialisation solveur', success,  cart_rank)
+
+    ! ===== Each line of the group is initialized considering it second indice =====
+    ! -- First line communcate just with myself (ie it host processus) --
+    ! Init the indice range for each lines.
+    rece_ind_min(:,1) = 1 + shift_bis*N_proc(direction)
+    rece_ind_max(:,1) = N_proc(direction)*(shift_bis+1)
+    ! -- Second line communicate with me and the previous processus --
+    rece_ind_min(:,2) = shift_bis*N_proc(direction)
+    rece_ind_max(:,2) = (1+shift_bis)*N_proc(direction)
+    ! -- Third line communicate with me and my two neighbors --
+    rece_ind_min(:,3) = shift_bis*N_proc(direction)
+    rece_ind_max(:,3) = (1+shift_bis)*N_proc(direction) + 1
+    ! -- Fourth line communicate with me and the next processus --
+    rece_ind_min(:,4) = shift_bis*N_proc(direction) + 1
+    rece_ind_max(:,4) = (1+shift_bis)*N_proc(direction) + 1
+    ! -- And for the fith line it depend of it firts indice inside the group --
+    rece_ind_min(:,5) = 1 + shift_bis*N_proc(direction)
+    rece_ind_min(3,5) = shift_bis*N_proc(direction)
+    rece_ind_max(:,5) = (1+shift_bis)*N_proc(direction) + 1
+    rece_ind_max(2,5) = (1+shift_bis)*N_proc(direction)
+
+    ! Compute the associated processus
+    rece_gap(:,:,1) = floor(real(rece_ind_min-1)/N_proc(direction))
+    rece_gap(:,:,2) = floor(real(rece_ind_max-1)/N_proc(direction))
+    rece_gap_abs(1) = minval(rece_gap(:,:,1))
+    rece_gap_abs(2) = maxval(rece_gap(:,:,2))
+    allocate(rece_rank(rece_gap_abs(1):rece_gap_abs(2)))
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(cartography(max_size,rece_gap_abs(1):rece_gap_abs(2)))
+
+
+    ! -- And launch the test --
+    cartography = -1
+    call AC_velocity_determine_communication(direction, ind_group, gs, send_gap,  &
+    & rece_gap, rece_gap_abs, rece_rank, cartography)
+
+    ! -- Check result --
+    ! send_gap
+    send_theoric(1,:) = (/-1-shift_bis,1-shift_bis/)
+    send_theoric(2,:) = 25
+    call test_check_success(send_gap, send_theoric, success, cart_rank)
+    call test_substatus('send_gap', success, cart_rank)
+    ! cartography
+    allocate(carto_th(max_size, rece_gap_abs(1):rece_gap_abs(2)))
+    carto_th = -1
+    if (rece_rank(-1+shift_bis) /= D_rank(direction)) then
+        carto_th(1:13,-1+shift_bis)= (/0,13,0,2,2,0,2,1,5,1,5,3,3/)
+    else
+        carto_th(1:7,-1+shift_bis)= (/0,7,0,0,0,0,0/)
+    end if
+    if (rece_rank(shift_bis) /= D_rank(direction)) then
+        carto_th(1:17,shift_bis)= (/0,17,2,2,2,2,2,1,5,1,5,1,5,1,5,1,5/)
+    else
+        carto_th(1:7,shift_bis)= (/0,7,0,0,0,0,0/)
+    end if
+    if (rece_rank(1+shift_bis) /= D_rank(direction)) then
+        carto_th(1:15,1+shift_bis)= (/0,15,0,0,2,2,4,1,5,1,5,1,1,3,5/)
+    else
+        carto_th(1:7,1+shift_bis)= (/0,7,0,0,0,0,0/)
+    end if
+    call test_check_success(cartography, carto_th, success, cart_rank)
+    call test_substatus('cartography', success, cart_rank)
+
+!        ! -- For the fourth: Contraction / extention --
+!        if (modulo(nb_proc_dim(direction),2)==0) then
+!            if (modulo(D_rank(direction),2)== 0) then
+!                ! Contraction
+!                j_min = 1 + shift_bis*N_proc(direction)
+!                j_max = (1+shift_bis)*N_proc(direction)
+!                send_begin = 0
+!                send_end = 0
+!            else
+!                ! Dilatation
+!                j_min = +1 - 2*bl_bound_size + shift_bis*N_proc(direction)
+!                j_max = (1+shift_bis)*N_proc(direction) + 2*bl_bound_size 
+!                send_begin = -1
+!                send_end = 1
+!            end if
+
+    deallocate(rece_rank)
+    deallocate(cartography)
+    deallocate(carto_th)
+
+    success = .not.success
+
+end function test_part_AC_velo_determine_com
+
+
+!> Particles method: validation of the velocity interpolation
+!!     @return      success     = test success (= false if the code pass the test)
+!!     @param[in]   direction   = 1 for along X, 2 for along Y, 3 for along Z
+!!     @param[in]   shift       = global translation of indice
+function test_part_AC_velo_compute_group(direction, shift) result(success)
+
+    ! External Library
+    use mpi
+    ! Scales code
+    use cart_topology
+    use advec_common
+    ! Test procedures
+    use advec_aux_init
+    use test_common
+
+    logical                                     :: success          
+    integer, intent(in)                         :: direction, shift
+
+    integer                                                 :: ind,ind1,ind2! indices
+    integer                                                 :: nn, clock    ! to generate random number
+    integer, dimension(:), allocatable                      :: seed         ! to generate random number
+    real(WP), dimension(N_proc(direction), 5, 5)            :: p_pos        ! location where the velocity is interpolated
+    real(WP), dimension(N_proc(direction))                  :: r            ! random numbers to initialise p_pos
+    real(WP), dimension(N_proc(direction),5,5)              :: velo         ! velocity field
+    real(WP), dimension(N_proc(direction),5,5)              :: good_velo    ! analytic interpolation of velocity field in particles position
+    real(WP)                                                :: dt           ! time step
+    integer                                                 :: T            ! for solver iteration
+    integer, dimension(2)                                   :: gs           ! group size
+
+    ! Initialisation
+    success = .true.
+    call test_substatus('test velocity interpolation', cart_rank)
+    call test_substatus('shift', shift, cart_rank)
+    ! To generate random number
+    call RANDOM_SEED(size = nn)
+    allocate(seed(nn))
+    call SYSTEM_CLOCK(COUNT=clock)   
+    seed = clock + 37 * (/ (ind - 1, ind = 1, nn) /)
+    call RANDOM_SEED(PUT = seed)   
+    deallocate(seed)
+    ! Position where the velocity will be interpolated
+    CALL RANDOM_NUMBER(r)
+    do ind = 1, N_proc(direction)
+        p_pos(ind,:,:) = (ind+shift)
+    end do
+    gs = group_size(direction,:)
+    call test_check_success((/5,5/),gs, success, cart_rank)
+    call test_substatus('group is 5', success, cart_rank)
+    if (success) then
+
+        ! ===== Init particle positions =====
+        ! -- First column : particle are inside current and previous processes --
+        p_pos(:,1,1) = p_pos(:,1,1) - 3 + r*d_sc(direction)
+        p_pos(:,2,1) = p_pos(:,2,1) - 3
+        p_pos(:,3,1) = p_pos(:,3,1) - 2 + r*d_sc(direction)
+        p_pos(:,4,1) = p_pos(:,4,1) - 2
+        p_pos(:,5,1) = p_pos(:,5,1) - 1 + r*d_sc(direction)
+        ! -- Second and third  column : they are on the current processus and my two neighbors --
+        p_pos(:,:,2) = p_pos(:,:,1) + 1
+        p_pos(:,:,3) = p_pos(:,:,1) + 2
+        ! -- Fourth column: paritcles are on the current and th next processes --
+        p_pos(:,:,4) = p_pos(:,:,1) + 3
+
+
+        ! ===== First test: constant velocity =====
+        do ind2 = 1, 5
+            do ind1 = 1, 5
+                call particle_velo_init('constant', velo(:,ind1,ind2), direction, p_pos(:,ind1,ind2), good_velo(:,ind1,ind2))
+            end do
+        end do
+        dt = 0.0
+        call AC_particle_velocity(dt, direction, gs, (/1,1/), p_pos, velo)
+        ! Check result
+        call test_check_success(velo, good_velo, success, cart_rank)
+        call test_substatus('constant velocity', success, cart_rank)
+
+        ! ===== Second test: unconstant velocity =====
+        do ind2 = 1, 5
+            do ind1 = 1, 5
+                call particle_velo_init('translation_field', velo(:,ind1,ind2), direction, &
+                    & p_pos(:,ind1,ind2), good_velo(:,ind1,ind2))
+            end do
+        end do
+        call AC_particle_velocity(dt, direction, gs, (/1,1/), p_pos, velo)
+        ! Check result
+        call test_check_success(velo, good_velo, success, cart_rank)
+        call test_substatus('unconstant velocity', success, cart_rank)
+    end if
+
+    ! Return not success
+    success = .not. success
+
+end function test_part_AC_velo_compute_group
+
+
+!> Particle method: validation of particle tag and block type determination for
+!! a group of line
+!!     @return      error   = test error (= false if the code pass the test) (= not success)
+function test_part_AC_type_and_block_O2_group() result(success)
+
+    ! Library
+    use mpi
+    ! Scales code
+    use advec
+    use advec_common    ! Some porcedure common to advection along all directions
+    use advec_variables ! contains info about solver parameters and others.
+    use cart_topology
+    ! Test procedures
+    use test_common
+
+    logical                                     :: success
+
+    integer                                     :: shift_bis    ! global translation of indices
+    character(str_short)                        :: order        ! order of the particles solver
+    
+    logical, dimension(:,:,:), allocatable      :: bl_type      ! correct (analytic) type of block
+    logical, dimension(:,:,:), allocatable      :: bl_tag       ! correct (analytic) tag of block
+    logical, dimension(:,:,:), allocatable      :: good_type    ! correct (analytic) type of block
+    logical, dimension(:,:,:), allocatable      :: good_tag     ! correct (analytic) tag of block
+    integer                                     :: direction    ! direction (2 if along Y, 3=along Z)
+    real(WP)                                    :: dt, cfl      ! time step and CFL number
+    real(WP), dimension(:,:,:), allocatable     :: p_V          ! particle velocity (used to tag particles)
+    integer                                     :: ind, ind2    ! indice of the current particle
+    integer                                     :: ind_bl       ! indice of the current block
+    real(WP)                                    :: lambda_min   ! minimum courant number on a block
+    integer                                     :: ind_tag_bl   ! indice of the first tagged block in the test
+    integer                                     :: ind_tag_p    ! indice of particle corresponding to the velocity 
+                                                                ! variation wich induce a tag of the block ind_tag_bl 
+    integer                                     :: ind_tag_bl2  ! indice of the second tagged block in the test
+    integer                                     :: ind_tag_p2   ! indice of particle wich induce a tag of the block ind_tag_bl2
+
+
+    ! Initialisation of context (solver, ...)
+    success = .true.
+    order ='p_O2'
+    call set_group_size(5)
+    call advec_init(order, verbosity=.false.)
+    direction = 3
+
+    ! Allocation
+    allocate(bl_type(bl_nb(direction)+1,group_size(direction,1),group_size(direction,2)))
+    allocate(good_type(bl_nb(direction)+1,group_size(direction,1),group_size(direction,2)))
+    allocate(bl_tag(bl_nb(direction),group_size(direction,1),group_size(direction,2)))
+    allocate(good_tag(bl_nb(direction),group_size(direction,1),group_size(direction,2)))
+    allocate(p_V(N_proc(direction),group_size(direction,1),group_size(direction,2)))
+
+    ! Initialize of the different parameter and field
+    dt = 1
+    cfl = dt/d_sc(direction)
+    p_V = 0
+
+    ! Test case = nothing on line of indice different from (2,3)
+    ! On that line, first part of block are left one, and second part are center.
+    ! Due to periodic condition, there is also 2 tag, one for each type switch
+    ! (left => center, and center => left).
+    ! Update dt in order to create "chock" without broke the stability condition
+    dt = 0.8*d_sc(direction)/sqrt(2*1.2*bl_size)
+    cfl = dt/d_sc(direction)
+    good_type = .false. 
+    good_tag = .false.
+    ind_tag_bl = (nb_proc_dim(direction)*bl_nb(direction)/3) - (bl_nb(direction)*coord(direction))
+    ind_tag_p = (ind_tag_bl-1)*bl_size + bl_size/2 + 1
+    ind_tag_p2 = floor((0.3/0.8)*(N(direction)-N_proc(direction)*coord(direction)-ind_tag_p) + ind_tag_p)
+    ind_tag_p2 = ind_tag_p2 + 1
+    ind_tag_bl2 = (ind_tag_p2 - 1 - bl_size/2)/bl_size + 1
+    if (modulo(ind_tag_p2-1-bl_size/2, bl_size)==0) ind_tag_bl2 = ind_tag_bl2 -1
+    if (ind_tag_bl <=bl_nb(direction)) then
+        if (ind_tag_bl>=1) then
+            ! Tag the right block transition
+            good_tag(ind_tag_bl, 2, 3) = .true.
+        else 
+            ! Change the velocity for the first half block
+            lambda_min = max(0,shift_bis) + 10
+            do ind = 1, bl_size/2
+                p_V(ind, 2, 3) = 0.8*(1.0-float(ind-ind_tag_p) &
+                    & /(N(direction)-N_proc(direction)*coord(direction)-ind_tag_p))/cfl
+            end do
+            if (ind+1<ind_tag_p2) good_type(1, 2, 3) = .true.
+        end if
+        ! Update velocity and block type
+        do ind_bl = max(2, ind_tag_bl+1), bl_nb(direction)
+            lambda_min = max(0,shift_bis) + 10
+            do ind = 1, bl_size
+                ind2 = ind+((ind_bl-2)*bl_size)+bl_size/2 ! the first block is only a half block
+                p_V(ind2, 2, 3) = 0.8*(1.-float(ind2-ind_tag_p) &
+                        &/(N(direction)-N_proc(direction)*coord(direction)-ind_tag_p))/cfl
+            end do
+            if (ind2+1<ind_tag_p2) good_type(ind_bl, 2, 3) = .true.
+        end do
+        ! For the last half block
+        lambda_min = max(0,shift_bis) + 10
+        ind_bl = bl_nb(direction) + 1
+        do ind = 1, bl_size/2
+            ind2 = ind+((ind_bl-2)*bl_size)+bl_size/2 ! the first block is only a half block
+            p_V(ind2, 2 , 3) = 0.8*(1.-float(ind2-ind_tag_p) &
+                    & /(N(direction)-N_proc(direction)*coord(direction)-ind_tag_p))/cfl
+        end do
+        if (ind2+bl_size/2+1<ind_tag_p2) good_type(ind_bl, 2, 3) = .true.
+    end if
+    if ((ind_tag_bl2 <= bl_nb(direction)).and.(ind_tag_bl2>0)) good_tag(ind_tag_bl2, 2, 3) = .true.
+    call AC_type_and_block(dt, direction, group_size(direction,:), (/1,1/), p_V, bl_type, bl_tag)
+
+
+    call test_check_success(bl_tag, good_tag, success, cart_rank)
+    call test_substatus('test 3 - two tag ', success, cart_rank)
+    call test_check_success(bl_type, good_type, success, cart_rank)
+    call test_substatus('test 3 - center at first, then left', success, cart_rank)
+
+
+    ! Deallocation
+    deallocate(bl_type)
+    deallocate(good_type)
+    deallocate(bl_tag)
+    deallocate(good_tag)
+    deallocate(p_V)
+
+    success = .not. success
+
+end function test_part_AC_type_and_block_O2_group
+
+
+! ============================================================
+! ==========          Test about remeshing          ==========
+! ============================================================
+
+!> Particle method: validation of computation of remeshing range for a group of line
+!!     @param[in]   shift       = global translation of indice
+!!     @return      error   = test error (= false if the code pass the test) (= not success)
+function test_part_AC_remesh_range() result(success)
+
+    ! Library
+    use mpi
+    ! Scales code
+    use advec
+    use advec_common    ! Some porcedure common to advection along all directions
+    use advec_variables ! contains info about solver parameters and others.
+    use cart_topology
+    ! Test procedures
+    use test_common
+    use advec_aux_init
+
+    logical                                 :: success
+
+    integer                                 :: shift_bis    ! global translation of indices
+    character(str_short)                    :: order        ! order of the particles solver
+    
+    logical, dimension(:,:,:), allocatable  :: bl_type      ! type of block
+    logical, dimension(:,:,:), allocatable  :: bl_tag       ! tag of block
+    integer                                 :: direction    ! direction (2 if along Y, 3=along Z)
+    real(WP), dimension(:,:,:), allocatable :: p_pos_adim   ! particle position
+    integer, dimension(:,:), allocatable    :: send_min     ! first mesh where particles are remeshed
+    integer, dimension(:,:), allocatable    :: good_send_min! theoritical first mesh where particles are remeshed
+    integer, dimension(:,:), allocatable    :: send_max     ! last mesh where particles are remeshed
+    integer, dimension(:,:), allocatable    :: good_send_max! theoritical last mesh where particles are remeshed
+    integer, dimension(:,:,:), allocatable  :: send_gap     ! distance between me and processus wich send me information
+    integer, dimension(2)                   :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2)                   :: gs           ! group size
+    integer, dimension(2)                   :: ind_group    ! group indice
+
+
+    ! Initialisation of context (solver, ...)
+    success = .true.
+    direction = 2
+    shift_bis = 0.0
+    ind_group = (/1,1/)
+    call set_group_size(5)
+    gs = group_size(direction,:)
+
+    ! Allocation
+    allocate(bl_type(bl_nb(direction)+1,group_size(direction,1),group_size(direction,2)))
+    allocate(bl_tag(bl_nb(direction),group_size(direction,1),group_size(direction,2)))
+    allocate(p_pos_adim(N_proc(direction),group_size(direction,1),group_size(direction,2)))
+    allocate(send_min(group_size(direction,1),group_size(direction,2)))
+    allocate(send_max(group_size(direction,1),group_size(direction,2)))
+    allocate(good_send_min(group_size(direction,1),group_size(direction,2)))
+    allocate(good_send_max(group_size(direction,1),group_size(direction,2)))
+    allocate(send_gap(group_size(direction,1),group_size(direction,2),2))
+
+    ! ----- Initialisation -----
+    bl_type = .true.
+    bl_tag = .false.
+    call pos_gp_init(direction, gs, shift_bis, p_pos_adim, bl_type)
+
+    ! ----- Lambda 2 corrected -----
+    order ='p_O2'
+    ! Init solver
+    call advec_init(order, verbosity=.false.)
+    ! Test procedure
+    call AC_remesh_range(bl_type, p_pos_adim, direction, send_min, send_max, send_gap, send_gap_abs)
+    ! Compute theoric value
+    where (bl_type(1,:,:))
+        ! First particle is a centered one
+        send_min = nint(p_pos_adim(1,:,:))-1
+    elsewhere
+        ! First particle is a left one
+        send_min = floor(p_pos_adim(1,:,:))-1
+    end where
+    where (bl_type(N_proc(direction)/bl_size +1,:,:))
+        ! Last particle is a centered one
+        send_max = nint(p_pos_adim(N_proc(direction),:,:))+1
+    elsewhere
+        ! Last particle is a left one
+        send_max = floor(p_pos_adim(N_proc(direction),:,:))+1
+    end where
+    ! Check results
+    call test_check_success(send_min, good_send_min, success, cart_rank)
+    call test_check_success(send_max, good_send_max, success, cart_rank)
+    call test_substatus('remesh range - order 2 ', success, cart_rank)
+
+    ! ----- Lambda 4 corrected -----
+    order ='p_O4'
+    ! Init solver
+    call advec_init(order, verbosity=.false.)
+    ! Test procedure
+    call AC_remesh_range(bl_type, p_pos_adim, direction, send_min, send_max, send_gap, send_gap_abs)
+    ! Compute theoric value
+    where (bl_type(1,:,:))
+        ! First particle is a centered one
+        send_min = nint(p_pos_adim(1,:,:))-2
+    elsewhere
+        ! First particle is a left one
+        send_min = floor(p_pos_adim(1,:,:))-2
+    end where
+    where (bl_type(N_proc(direction)/bl_size +1,:,:))
+        ! Last particle is a centered one
+        send_max = nint(p_pos_adim(N_proc(direction),:,:))+2
+    elsewhere
+        ! Last particle is a left one
+        send_max = floor(p_pos_adim(N_proc(direction),:,:))+2
+    end where
+    ! Check results
+    call test_check_success(send_min, good_send_min, success, cart_rank)
+    call test_check_success(send_max, good_send_max, success, cart_rank)
+    call test_substatus('remesh range - order 4 ', success, cart_rank)
+    
+    ! ----- M'6 -----
+    order ='p_O2'
+    ! Init solver
+    call advec_init(order, verbosity=.false.)
+    ! Test procedure
+    call AC_remesh_range(bl_type, p_pos_adim, direction, send_min, send_max, send_gap, send_gap_abs)
+    ! Compute theoric value
+    send_min = nint(p_pos_adim(1,:,:))-2
+    send_max = floor(p_pos_adim(N_proc(direction),:,:))+3
+    ! Check results
+    call test_check_success(send_min, good_send_min, success, cart_rank)
+    call test_check_success(send_max, good_send_max, success, cart_rank)
+    call test_substatus('remesh range - Mprime 6 ', success, cart_rank)
+!    call AC_remesh_determine_communication(direction, gs, ind_group, rece_gap, send_gap, send_gap_abs, send_rank, cartography)
+
+
+    ! Free memory
+    deallocate(bl_type)
+    deallocate(bl_tag)
+    deallocate(p_pos_adim)
+    deallocate(send_min)
+    deallocate(send_max)
+    deallocate(good_send_min)
+    deallocate(good_send_max)
+    deallocate(send_gap)
+
+    success = .not. success
+
+end function test_part_AC_remesh_range
+
+
+!> Particle method: validation of computation of remeshing range for a group of line
+!!     @return      error   = test error (= false if the code pass the test) (= not success)
+function test_part_AC_remesh_determine_communication(shift) result(success)
+
+    ! Library
+    use mpi
+    ! Scales code
+    use advec
+    use advec_common    ! Some porcedure common to advection along all directions
+    use advec_variables ! contains info about solver parameters and others.
+    use cart_topology
+    ! Test procedures
+    use test_common
+    use advec_aux_init
+
+    integer, intent(in), optional           :: shift
+    logical                                 :: success
+
+    integer                                 :: shift_bis    ! global translation of indices
+    character(str_short)                    :: order        ! order of the particles solver
+    
+    logical, dimension(:,:,:), allocatable  :: bl_type      ! type of block
+    logical, dimension(:,:,:), allocatable  :: bl_tag       ! tag of block
+    integer                                 :: direction    ! direction (2 if along Y, 3=along Z)
+    real(WP), dimension(:,:,:), allocatable :: p_pos_adim   ! particle position
+    integer, dimension(:,:), allocatable    :: send_min     ! first mesh where particles are remeshed
+    integer, dimension(:,:), allocatable    :: send_max     ! last mesh where particles are remeshed
+    integer, dimension(:,:,:), allocatable  :: send_gap     ! distance between me and processus wich send me information
+    integer, dimension(2)                   :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2 , 2)               :: rece_gap     ! distance between me and processus to wich I send information
+    integer, dimension(2)                   :: gs           ! group size
+    integer, dimension(2)                   :: ind_group    ! group indice
+    integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
+    integer, dimension(:,:), allocatable    :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the 
+                                                            ! current processus will send data during remeshing and for each of these lines the range 
+                                                            ! of mesh points from where it requiers the velocity values.
+    integer, dimension(:), allocatable      :: send_rank    ! rank of processus to wich I send information
+
+
+    ! Initialisation of context (solver, ...)
+    success = .true.
+    direction = 2
+    shift_bis = 0.0
+    ind_group = (/1,1/)
+    call set_group_size(5)
+    gs = group_size(direction,:)
+    if (present(shift)) then
+        shift_bis = shift
+    else
+        shift_bis = 0
+    end if
+
+
+    ! Allocation
+    allocate(bl_type(bl_nb(direction)+1,group_size(direction,1),group_size(direction,2)))
+    allocate(bl_tag(bl_nb(direction),group_size(direction,1),group_size(direction,2)))
+    allocate(p_pos_adim(N_proc(direction),group_size(direction,1),group_size(direction,2)))
+    allocate(send_min(group_size(direction,1),group_size(direction,2)))
+    allocate(send_max(group_size(direction,1),group_size(direction,2)))
+    allocate(send_gap(group_size(direction,1),group_size(direction,2),2))
+
+    ! ----- Initialisation -----
+    bl_type = .true.
+    bl_tag = .false.
+    call pos_gp_init(direction, gs, shift_bis, p_pos_adim, bl_type)
+
+    ! ===== Test method =====
+    ! -- Init solver --
+    order ='p_O2'
+    call advec_init(order, verbosity=.false.)
+    ! -- Compute range --
+    call AC_remesh_range(bl_type, p_pos_adim, direction, send_min, send_max, send_gap, send_gap_abs)
+    ! -- Allocation --
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(cartography(max_size,send_gap_abs(1):send_gap_abs(2)))
+    allocate(send_rank(send_gap_abs(1):send_gap_abs(2)))
+    ! -- Test the target procedure: determine which processes communicate together --
+    call AC_remesh_determine_communication(direction, gs, ind_group, rece_gap, send_gap, send_gap_abs, send_rank, cartography)
+
+    ! -- Compute theoritical results
+
+    ! -- Check results --
+    !call test_check_success(send_min, good_send_min, success, cart_rank)
+    !call test_check_success(send_max, good_send_max, success, cart_rank)
+    !call test_substatus('remesh range - order 2 ', success, cart_rank)
+
+
+    ! -- Free memory --
+    deallocate(bl_type)
+    deallocate(bl_tag)
+    deallocate(p_pos_adim)
+    deallocate(send_min)
+    deallocate(send_max)
+    deallocate(send_gap)
+    deallocate(cartography)
+    deallocate(send_rank)
+
+    success = .not. success
+
+end function test_part_AC_remesh_determine_communication
+
+end module advec_aux_common
+!> @}
diff --git a/HySoP/src/Unstable/LEGI/test/src/Test_advec/advec_aux_init.f90 b/HySoP/src/Unstable/LEGI/test/src/Test_advec/advec_aux_init.f90
new file mode 100644
index 0000000000000000000000000000000000000000..9747a839c4dbcdcada15ce65aef9b87ca059e406
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/test/src/Test_advec/advec_aux_init.f90
@@ -0,0 +1,529 @@
+!> @addtogroup part_test
+!! @{
+    
+!------------------------------------------------------------------------------
+!
+! MODULE: test_advection_init
+!
+! DESCRIPTION: 
+!> Initialisation procedure for advection test (and solver based on particle method).
+!!
+!! @details
+!! This module provide different initialisation setup in order to test the transport solver. 
+!!
+!! The following initialisation are included :
+!!      1 -> Constant field for both scalar and velocity
+!!      2 -> 2D-rotation of a sphere
+!!      3 -> scalar(i,j,k) = i/Nx + 10* j/Ny + 100*k/Nz with periodic boundary condition
+!!      4 -> velocity(i,j,k) = i/Nx + 10* j/Ny + 100*k/Nz with periodic boundary condition
+!!
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_aux_init
+
+    use string
+    use precision
+    implicit none
+
+    ! ===== Initialisation for advection test =====
+    ! Public function
+    public              :: velo_init
+    public              :: particle_velo_init
+    public              :: scal_init
+    public              :: scal_velo_init_part
+    ! Private function
+    private             :: compute_velo_tag
+
+    ! ===== Setup parameter =====
+    ! Public variables
+    !> Period for rotation cases
+    real(WP), public    :: period = 1
+    ! Private variable
+    !> Pi value
+    real(WP), parameter   :: M_PI = ACOS(-1.0)
+
+
+
+
+contains
+
+!> Initialisation of the scalar field for the different tests.
+!!    @param[in]    init        = parameter to initialise the scalar fied to a constant one or to a sphere shape
+!!    @param[in]    translat    = optional argument, translat the field, in order to obtain analytic solution for non zero velocity in test case
+!!                                  "translation field"
+!!    @param[out]   scalar      = scalar field
+!!    @param[out]   good_scal   = analytic solution
+subroutine scal_init(init, scalar, good_scal, translat) 
+
+    use test_common
+    use cart_topology
+
+    character(len=*), intent(in)                    :: init
+    real(WP), dimension(:,:,:), intent(out)         :: scalar
+    real(WP), dimension(:,:,:), intent(out)         :: good_scal
+    real(WP), dimension(3), intent(in), optional    :: translat
+
+    real(WP)                                        :: rr, rx, ry, rz, rayon
+    real(WP)                                        :: dist
+    integer                                         :: i, j, k
+
+
+    select case(init)
+        case('constant')
+            call test_substatus('constant scal', cart_rank)
+            scalar = 1. 
+            good_scal = 1.
+        case('2D_rot')
+            call test_substatus('uncentred sphere', cart_rank)
+            scalar = 0.
+            rayon = (minval(N*d_sc)/10.0)**2
+            do k = 1, N_proc(3)
+                rz = (d_sc(3)*(k + coord(3)*N_proc(3) - 3.0*N(3)/5.0))**2
+                do j = 1, N_proc(2)
+                    ry = (d_sc(2)*(j + coord(2)*N_proc(2)- 3.0*N(2)/5.0))**2
+                    do i = 1, N_proc(1)
+                        rx = (d_sc(1)*(i - 3.0*N(1)/5.0))**2
+                        rr = rx + ry + rz
+                        if (rr < rayon) scalar(i,j,k) = (1 - rr/rayon)**4
+                    end do
+                end do
+            end do
+            good_scal = scalar
+        case('translation_field')
+            call test_substatus('translation field', cart_rank)
+            if (present(translat)) then
+                call test_substatus('translation on X', translat(1), cart_rank)
+                call test_substatus('translation on Y', translat(2), cart_rank)
+                call test_substatus('translation on Z', translat(3), cart_rank)
+            else
+                call test_substatus('velocity = zero ', cart_rank)
+            end if
+            do k = 1, N_proc(3)
+                do j = 1, N_proc(2)
+                    do i = 1, N_proc(1)
+                        dist = (float(i-1+coord(1)*N_proc(1))/N(1)) 
+                        scalar(i,j,k) = cos((dist-0.5)*2*M_PI)
+                        dist = (float(j-1+coord(2)*N_proc(2))/N(2))
+                        scalar(i,j,k) = scalar(i,j,k)*cos((dist-0.5)*2*M_PI)
+                        dist = (float(k-1+coord(3)*N_proc(3))/N(3)) 
+                        scalar(i,j,k) = scalar(i,j,k)*cos((dist-0.5)*2*M_PI)
+                        if (present(translat)) then
+                            ! Only periodic condition
+                            dist = (i+translat(1)-1+coord(1)*N_proc(1))*d_sc(1) ! distance au bord x = 0
+                            dist = modulo(dist, length(1)) ! on utilise la periodicite
+                            ! In other case, as the velocity is a linear function of the position, the analytic velocity
+                            ! is the same that the interpolate one
+                            good_scal(i,j,k) = cos((dist-0.5)*2*M_PI)
+                            dist = (j+translat(2)-1+coord(2)*N_proc(2))*d_sc(2)
+                            dist = modulo(dist, length(2))
+                            good_scal(i,j,k) = good_scal(i,j,k) * cos((dist-0.5)*2*M_PI)
+                            dist = (k+translat(3)-1+coord(3)*N_proc(3))*d_sc(3)
+                            dist = modulo(dist, length(3))
+                            good_scal(i,j,k) = good_scal(i,j,k) * cos((dist-0.5)*2*M_PI)
+                        else
+                            good_scal(i,j,k) = scalar(i,j,k)
+                        end if
+                    end do
+                end do
+            end do
+        case default
+            scalar = 1.
+            good_scal = 1.
+    end select
+
+end subroutine scal_init
+
+
+
+!> Initialisation of the velocity field to test its interpolation
+!!    @param[in]        init        = parameter to initialise the scalar fied to a constant one or to a sphere shape
+!!    @param[out]       velo        = velocity field along one direction
+!!    @param[in]        direction   = current direction (along X,Y or Z-axis)
+!!    @param[in]        p_pos_adim  = adimensionned particles postion (location where the velocity will be interpolated)
+!!    @param[in,out]    good_velo   = analytic interpolation of the velocity field (on location p_pos)
+subroutine velo_init(init, velo, direction, p_pos_adim, good_velo)
+
+    use test_common
+    use cart_topology
+
+    character(len=*), intent(in)                :: init
+    real(WP), dimension(:,:,:), intent(inout)   :: velo
+    real(WP), dimension(:), intent(in)          :: p_pos_adim
+    integer, intent(in)                         :: direction
+    real(WP), dimension(:,:,:), intent(inout)   :: good_velo
+
+    real(WP)                                    :: dist         ! distance from original position for translation case
+    integer                                     :: i, j, k      ! mesh indices
+    integer                                     :: ierr         ! mpi error code
+
+
+    select case(init)
+        case('constant')
+            velo = 1. 
+            good_velo = 1.
+        case('2D_rot')
+            select case(direction)
+                case(1)
+                    do k = 1, N_proc(3)
+                        do j = 1, N_proc(2)
+                            do i = 1, N_proc(1)
+                                velo(i,j,k)=(2*M_PI/period)*(length(2)/2.0-((j+coord(2)*N_proc(2))*d_sc(2)))
+                                good_velo(i,j,k)=(2*M_PI/period)*(length(2)/2.0-((j+coord(2)*N_proc(2))*d_sc(2)))
+                            end do
+                        end do
+                    end do
+                case(2)
+                    do k = 1, N_proc(3)
+                        do j = 1, N_proc(2)
+                            do i = 1, N_proc(1)
+                                velo(i,j,k)=(2*M_PI/period)*(((i+coord(1)*N_proc(1))*d_sc(1))-length(1)/2.0)
+                                good_velo(i,j,k)=(2*M_PI/period)*(((i+coord(1)*N_proc(1))*d_sc(1))-length(1)/2.0)
+                            end do
+                        end do
+                    end do
+                case(3)
+                    do k = 1, N_proc(3)
+                        do j = 1, N_proc(2)
+                            do i = 1, N_proc(1)
+                                velo(i,j,k)=0
+                                good_velo(i,j,k)=0
+                            end do
+                        end do
+                    end do
+                case default
+                    call test_substatus(' XXX error : wrong direction =', direction,  cart_rank)
+                    stop
+            end select
+        case('translation_field')
+            select case(direction)
+                case(1)
+                    do k = 1, N_proc(3)
+                        do j = 1, N_proc(2)
+                            do i = 1, N_proc(1)
+                                velo(i,j,k) = (float(i-1)/N(1)) &
+                                    & + 10*(float(j-1+coord(2)*N_proc(2))/N(2)) &
+                                    & + 100*(float(k-1+coord(3)*N_proc(3))/N(3)) 
+                                if (periods(1) .eqv. .true.) then
+                                    dist = (p_pos_adim(i)-1+coord(1)*N_proc(1))*d_sc(1) ! distance au bord x = 0
+                                    dist = modulo(dist, length(1)) ! on utilise la periodicite
+                                    ! If dist belong to (length - dx, length) then with have to interpolate 
+                                    ! the velocity between postion (length-dx) and 0 (due to periodicity boundary condition)
+                                    if (dist>length(1)-d_sc(1)) dist = (length(1)-dist)*(length(1)-d_sc(1))/d_sc(1)
+                                    ! In other case, as the velocity is a linear function of the position, the analytic velocity
+                                    ! is the same that the interpolate one
+                                    good_velo(i,j,k) = dist/length(1) &
+                                        & + 10*(float(j-1+coord(2)*N_proc(2))/N(2)) &
+                                        & + 100*(float(k-1+coord(3)*N_proc(3))/N(3)) 
+                                else
+                                    call test_substatus(' boundary along X condition not implemented ', cart_rank)
+                                end if
+                            end do
+                        end do
+                    end do
+                case(2)
+                    do k = 1, N_proc(3)
+                        do j = 1, N_proc(2)
+                            do i = 1, N_proc(1)
+                                velo(i,j,k) = (float(i-1)/N(1)) &
+                                    & + 10*(float(j-1+coord(2)*N_proc(2))/N(2)) &
+                                    & + 100*(float(k-1+coord(3)*N_proc(3))/N(3)) 
+                                if (periods(2) .eqv. .true.) then
+                                    dist = (p_pos_adim(j)-1+coord(2)*N_proc(2))*d_sc(2)
+                                    dist = modulo(dist, length(2))
+                                    ! See direction 1 for explaination
+                                    if (dist>length(2)-d_sc(2)) dist = (length(2)-dist)*(length(2)-d_sc(2))/d_sc(2)
+                                    good_velo(i,j,k) = (float(i-1)/N(1)) &
+                                        & + 10*(dist)/length(2) &
+                                        & + 100*(float(k-1+coord(3)*N_proc(3))/N(3))
+                                else
+                                    call test_substatus(' boundary along X condition not implemented ', cart_rank)
+                                end if
+                            end do
+                        end do
+                    end do
+                case(3)
+                    do k = 1, N_proc(3)
+                        do j = 1, N_proc(2)
+                            do i = 1, N_proc(1)
+                                velo(i,j,k) = (float(i-1)/N(1)) &
+                                    & + 10*(float(j-1+coord(2)*N_proc(2))/N(2)) &
+                                    & + 100*(float(k-1+coord(3)*N_proc(3))/N(3)) 
+                                if (periods(3) .eqv. .true.) then
+                                    dist = (p_pos_adim(k)-1+coord(3)*N_proc(3))*d_sc(3)
+                                    dist = modulo(dist, length(3))
+                                    ! See direction 1 for explaination
+                                    if (dist>length(3)-d_sc(3)) dist = (length(3)-dist)*(length(3)-d_sc(3))/d_sc(3)
+                                    good_velo(i,j,k) = (float(i-1)/N(1)) &
+                                        & + 10*(float(j-1+coord(2)*N_proc(2))/N(2)) &
+                                        & + 100*dist/length(3)
+                                else
+                                    call test_substatus(' boundary along X condition not implemented ', cart_rank)
+                                end if
+                            end do
+                        end do
+                    end do
+                case default
+                    call test_substatus(' XXX error : wrong direction =', direction,  cart_rank)
+                    stop
+                end select
+        case default
+            velo = 1.
+            good_velo = 1.
+    end select
+
+
+end subroutine velo_init
+
+
+!> Initialisation of particle velocity field to test its interpolation
+!!    @param[in]        init        = parameter to initialise the scalar fied to a constant one or to a sphere shape
+!!    @param[out]       velo        = initial particles velocity
+!!    @param[in]        direction   = current direction (along X,Y or Z-axis)
+!!    @param[in]        p_pos_adim  = adimensionned particles postion (location where the velocity will be interpolated)
+!!    @param[in,out]    good_velo   = analytic interpolation of the velocity field (on location p_pos)
+subroutine particle_velo_init(init, velo, direction, p_pos_adim, good_velo)
+
+    use test_common
+    use cart_topology
+
+    character(len=*), intent(in)                :: init
+    real(WP), dimension(:), intent(inout)       :: velo
+    real(WP), dimension(:), intent(in)          :: p_pos_adim
+    integer, intent(in)                         :: direction
+    real(WP), dimension(:), intent(inout)       :: good_velo
+
+    real(WP)                                    :: dist         ! distance from original position for translation case
+    integer                                     :: ind          ! particle indice
+
+
+    select case(init)
+        case('constant')
+            velo = 1. 
+            good_velo = 1.
+        case('translation_field')
+            do ind = 1, size(velo)
+                velo(ind) = (float(ind-1+coord(direction)*N_proc(direction))/N(direction))
+                if (periods(direction) .eqv. .true.) then
+                    dist = (p_pos_adim(ind)-1+coord(direction)*N_proc(direction))*d_sc(direction)
+                    dist = modulo(dist, length(direction))
+                    ! See direction 1 for explaination
+                    if (dist>length(direction)-d_sc(direction)) &
+                        & dist = (length(direction)-dist)*(length(direction)-d_sc(direction))/d_sc(direction)
+                    good_velo(ind) = dist/length(direction)
+                else
+                    call test_substatus(' boundary condition not implemented ', cart_rank)
+                end if
+            end do
+        case default
+            velo = 1.
+            good_velo = 1.
+    end select
+
+end subroutine particle_velo_init
+
+
+!> Initialisation of paticles and velocity field to provide setups to test advection
+!!    @param[in]        init        = parameter used to choose between the different setup
+!!    @param[in,out]    dt          = time step (used to compute solution)
+!!    @param[in]        shift       = global translation of indices (optional)
+!!    @param[out]       scal3D      = scalar field
+!!    @param[out]       velo        = velocity field along one direction
+!!    @param[in]        direction   = current direction (along X,Y or Z-axis)
+!!    @param[out]       good_scal   = analytic solution of the advection problem
+subroutine scal_velo_init_part(init, dt, shift, scal3D, velo, direction, good_scal)
+
+    ! Scales code
+    use cart_topology
+    use advec_common    ! Some porcedure common to advection along all directions
+    use advec_variables ! contains info about solver parameters and others.
+    ! Test procedures
+    use test_common
+
+    character(len=*), intent(in)                                    :: init
+    real(WP), intent(inout)                                         :: dt
+    integer, intent(in)                                             :: shift
+    real(WP), dimension(N_proc(1),N_proc(2),N_proc(3)), intent(out) :: velo
+    real(WP), dimension(N_proc(1),N_proc(2),N_proc(3)), intent(out) :: scal3D
+    real(WP), dimension(N_proc(1),N_proc(2),N_proc(3)), intent(out) :: good_scal
+    integer, intent(in)                                             :: direction
+
+    integer                                     :: i, j, k      ! mesh indices
+    real(WP)                                    :: cfl          ! ratio between time and space steps
+    real(WP)                                    :: t            ! some time indication
+    real(WP)                                    :: dt_bis       ! time step used to compute trajectories
+                                                                ! with a numerical integration
+    real(WP)                                    :: sX, sY, sZ   ! some temp variable
+    real(WP), dimension(3)                      :: vect_dir     ! some temp variable
+    
+
+    cfl = dt/d_sc(direction)
+
+
+    ! -- Initialize velocity and compute some trajectories --
+    select case(init)
+        case('left')
+            !  -- Test case 1 : only left block, no tag --
+            velo = (shift+0.3)/cfl
+            scal3D = -dt*velo
+            call test_substatus('particle- no tag, only left block', cart_rank)
+
+        case('tag')
+            ! Update dt in order to create "chock" without broke the stability condition
+            dt = 0.8*d_sc(direction)/sqrt(2*1.2*bl_size)
+            cfl = dt/d_sc(direction)
+            ! Compute trajectories
+            scal3D = 0.0
+            do k = 1, N_proc(3)
+                do j = 1, N_proc(2)
+                    do i = 1, N_proc(1)
+                        velo(i,j,k) = compute_velo_tag(dble(j), shift, cfl, direction)
+                        dt_bis = min(0.01_WP, dt/10)
+                        t = 0
+                        do while(t <= dt-dt_bis)
+                            scal3D(i,j,k) = scal3D(i,j,k) &
+                                &- dt_bis*compute_velo_tag(j+scal3D(i,j,k), shift, cfl, direction)/d_sc(direction)
+                            t = t + dt_bis
+                        end do
+                        dt_bis = dt - t
+                        scal3D(i,j,k) = scal3D(i,j,k) &
+                             &- dt_bis*compute_velo_tag(j+scal3D(i,j,k), shift, cfl, direction)/d_sc(direction)
+                    end do
+                end do
+            end do
+            call test_substatus('particle- 2 tag, left and center', cart_rank)
+
+        case default ! default = 'center'
+            ! Test case 2 : only center block, no tag
+            velo = (shift+0.8)/cfl
+            scal3D = -dt*velo
+            call test_substatus('particle- no tag, only center block', cart_rank)
+    end select
+
+    ! -- Compute solution of the advection problem
+    vect_dir = 0
+    vect_dir(direction) = 1
+    do k = 1, N_proc(3)
+        sZ = (k-1+(coord(3)*N_proc(3)))*d_sc(3) 
+        do j = 1, N_proc(2)
+            sY = (j-1+(coord(2)*N_proc(2)))*d_sc(2)
+            do i = 1, N_proc(1)
+                sX = (i-1+(coord(1)*N_proc(1)))*d_sc(1)
+                good_scal(i,j,k) = cos(2*M_PI*(sZ+vect_dir(3)*scal3D(i,j,k))/length(3))
+                good_scal(i,j,k) = good_scal(i,j,k)*cos(2*M_PI*(sY+vect_dir(2)*scal3D(i,j,k))/length(2))
+                !good_scal(i,j,k) = good_scal(i,j,k)*cos(3*2*M_PI*(sY+vect_dir(2)*scal3D(i,j,k))/length(2))
+                good_scal(i,j,k) = good_scal(i,j,k)*cos(2*M_PI*(sX+vect_dir(1)*scal3D(i,j,k))/length(1))
+                !good_scal(i,j,k) = good_scal(i,j,k)*cos(5*2*M_PI*(sX+vect_dir(1)*scal3D(i,j,k))/length(1))
+                scal3D(i,j,k) = cos(2*M_PI*sZ/length(3))
+                scal3D(i,j,k) = scal3D(i,j,k)*cos(2*M_PI*sY/length(2))
+                !scal3D(i,j,k) = scal3D(i,j,k)*cos(3*2*M_PI*sY/length(2))
+                scal3D(i,j,k) = scal3D(i,j,k)*cos(2*M_PI*sX/length(1))
+                !scal3D(i,j,k) = scal3D(i,j,k)*cos(5*2*M_PI*sX/length(1))
+            end do
+        end do
+    end do
+
+end subroutine scal_velo_init_part
+
+!> Compute a velocity field wich produce 2 tagged block during an advection step
+!! with the solver based on particle method
+!!    @param[in]    pos         = relative position along current direction
+!!    @param[in]    shift       = shift  (in number of mesh)
+!!    @param[in]    cfl         = time step/space step
+!!    @param[in]    direction   = current direction
+!!    @return       res         = velocity field
+function compute_velo_tag(pos, shift, cfl, direction) result(res)
+
+    ! Topology
+    use cart_topology
+    ! Solver information
+    use advec_common    ! Some porcedure common to advection along all directions
+    use advec_variables ! contains info about solver parameters and others.
+    ! Test tools
+    use test_common
+
+    real(WP), intent(in)    :: pos, cfl
+    integer, intent(in)     :: shift, direction
+    real(WP)                :: res
+    real(WP)                :: pos_abs      ! absolute position (i,j,k are relative position in the current processus)
+    integer                 :: ind_tag_bl, ind_tag_p
+
+    ind_tag_bl = (nb_proc_dim(direction)*(N(direction)/bl_size)/3)! - (bl_number(direction)*coord(direction))
+    ind_tag_p = (ind_tag_bl-1)*bl_size + bl_size/2 + 1
+
+    pos_abs = pos + coord(direction)*N_proc(direction)
+    
+    res = shift/cfl
+    if ((pos_abs >= ind_tag_p).and.(pos_abs < N(direction))) then
+        res = res + 0.8*(1.0-(pos_abs-ind_tag_p)/(N(direction)-ind_tag_p))/cfl
+    end if
+
+end function compute_velo_tag
+
+
+!> Initialisation of particle position for a group
+!!    @param[in]    direction   = current direction
+!!    @param[in]    gs          = group_size
+!!    @param[in]    shift       = shift  (in number of mesh)
+!!    @param[out]   p_pos       = scalar field
+!!    @param[out]   bl_type     = bloc type (left/center), optional
+subroutine pos_gp_init(direction, gs, shift, p_pos, bl_type)
+
+    ! Topology
+    use cart_topology
+    ! Solver information
+    use advec_common    ! Some porcedure common to advection along all directions
+    use advec_variables ! contains info about solver parameters and others.
+    ! Test tools
+    use test_common
+
+    integer, intent(in)                                                             :: shift, direction
+    integer, dimension(2), intent(in)                                               :: gs
+    real(WP), dimension(N_proc(direction), gs(1), gs(2)), intent(out)               :: p_pos
+    logical, dimension(bl_nb(direction)+1, gs(1), gs(2)), intent(inout), optional   :: bl_type
+
+    integer     :: ind, ind2 ! some indice (line coordiante, particle indice)
+
+    ! ===== Each line of the group is initialized considering it second indice =====
+    ind2 = 1
+    do while(ind2<=gs(2))
+        ! Init are done with a cycle of five
+        select case(modulo(ind2,5))
+        case(1)
+            do ind = 1, N_proc(direction)
+                p_pos(ind,:,ind2) = ind - 2.1 + shift*N_proc(direction)
+            end do
+            if(present(bl_type)) bl_type(:,:,ind2) = .true.
+        case(2)
+            do ind = 1, N_proc(direction)
+                p_pos(ind,:,ind2) = ind - 1.1 + shift*N_proc(direction)
+            end do
+            if(present(bl_type)) bl_type(:,:,ind2) = .true.
+        case(3)
+            do ind = 1, N_proc(direction)
+                p_pos(ind,:,ind2) = ind + shift*N_proc(direction)
+            end do
+            if(present(bl_type)) bl_type(:,:,ind2) = .false.
+        case(4)
+            do ind = 1, N_proc(direction)
+                p_pos(ind,:,ind2) = ind + 1.1 + shift*N_proc(direction)
+            end do
+            if(present(bl_type)) bl_type(:,:,ind2) = .false.
+        case default
+            do ind = 1, N_proc(direction)
+                p_pos(ind,:,ind2) = ind + 2.1 + shift*N_proc(direction)
+            end do
+            if(present(bl_type)) bl_type(:,:,ind2) = .false.
+        end select
+
+        ind2 = ind2 +1
+    end do
+
+end subroutine pos_gp_init
+
+
+end module advec_aux_init
+!> @}
diff --git a/HySoP/src/Unstable/LEGI/test/src/Test_advec/advec_main.f90 b/HySoP/src/Unstable/LEGI/test/src/Test_advec/advec_main.f90
new file mode 100644
index 0000000000000000000000000000000000000000..795eae187da98355231f9724504a32ff2b2af9f0
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/test/src/Test_advec/advec_main.f90
@@ -0,0 +1,213 @@
+!> @addtogroup part_test
+!! @{
+    
+!------------------------------------------------------------------------------
+!
+! PROGRAM : advec_main
+!
+! DESCRIPTION: 
+!> This program use the function implemented in the module advec_aux to
+!! test the advection solver.
+!!
+!! @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!> @details
+!! All these test are unit test: they return a logical value to check if 
+!! the code version pass it or not. 
+!!
+!! That is all these test are logical function, they return true if the result
+!! is the right one and false otherwise. 
+!! All the "test_part_*" function are devoted to validate the particular
+!! solver
+!!
+!! The following test are included:
+!!    - Validate the particular method, step by step
+!!        - Test the procedure "AC_obtain_senders" from advec_common
+!!        - Validate the redistribution the buffer during the remeshing
+!!        - Validate the redistribution the buffer during the remeshing -
+!!            debug version : only one processus contains non-zero field.
+!!        - Validate the remeshing of untagged particles
+!!        - Validate the velocity interpolation (the RK2 scheme use the
+!!            velocity at midlle point to advec particles).
+!!        - Validate how type (left or center) and tag are computing for a
+!!            single line
+!!        - Validate how type (left or center) and tag are computing for a
+!!            group of line
+!!    - Validate an advection solver (in advec_aux)
+!!        - advec a ball with a constant velocity
+!!        - advec a ball with a spheric velocity field (the ball turns)
+!!        - Advection with radial shear. This test also validate the remeshing 
+!!              of tagged particles.
+!!
+!
+!------------------------------------------------------------------------------
+
+program advec_main
+
+    ! External Library
+    use mpi
+    ! Scales code
+    use cart_topology
+    ! Test procedures
+    use test_common
+    use advec_aux
+    use advec_aux_common
+    use advec_aux_common_line
+
+    implicit none
+
+    logical     :: error = .false.  ! logical error
+    integer     :: ierr             ! mpi error code
+    integer     :: rank_world       ! processus rank on "MPI_COMM_WORLD"
+    integer     :: nb_proc,nb_procZ ! number of processus
+    integer     :: i                ! some boucle indice
+    integer     :: direction        ! along X (1), Y (2) or Z (3)
+   
+    ! ===== Initialisation =====
+
+    ! Set the verbosity
+    verbose_test = .true.
+    verbose_more = .false.
+    ! Initialise mpi
+    call mpi_init(ierr)    
+    call mpi_comm_rank(MPI_COMM_WORLD, rank_world, ierr)
+    call mpi_comm_size(MPI_COMM_WORLD, nb_proc, ierr)
+
+    ! Cut the domain along Y and initialize the toppology
+    nb_procZ = 1
+    if ((mod(nb_proc,5)==0).and.(mod(default_size, nb_proc/5)==0)) then
+        nb_procZ = 5
+        nb_proc = nb_proc/5
+    else if ((mod(nb_proc,2)==0).and.(mod(default_size, nb_proc/2)==0)) then
+        nb_procZ = 2
+        nb_proc = nb_proc/2
+    else
+        if (mod(default_size, nb_proc)/=0) stop 'wrong number of processes : it have to divide 100'
+    end if
+    call cart_create((/ nb_proc, nb_procZ /), ierr)
+    call discretisation_default()
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+
+    ! ===== Test about procedures involved in remeshing process =====
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call test_title('particle method - remeshing', rank_world)
+    
+    ! Does it well compute who communicate with who during remeshing ?
+    error = test_part_AC_obtain_senders()
+    call test_status(error, 'obtain_senders sans shift', rank_world)
+    error = test_part_AC_obtain_senders(1)
+    call test_status(error, 'obtain_senders avec shift', rank_world)
+    error = test_part_AC_obtain_senders(3*nb_proc_dim(2)+2)
+    call test_status(error, 'obtain_senders avec large shift', rank_world)
+    
+    ! The remeshing are done in a buffer. Is it well re-distribuate on processes ?
+    do i = 0, min(4, nb_proc-1)
+        error = test_part_AC_bufferToScalar_Deb(i)
+        call test_status(error, 'bufferToScalar deb, no shift, rank =', i, rank_world)
+        error = test_part_AC_bufferToScalar_Deb(i,1)
+        call test_status(error, 'bufferToScalar deb,shift; rank =', i, rank_world)
+    end do
+    error = test_part_AC_bufferToScalar()
+    call test_status(error, 'bufferToScalar sans shift', rank_world)
+    error = test_part_AC_bufferToScalar(1)
+    call test_status(error, 'bufferToScalar avec shift', rank_world)
+    error = test_part_AC_bufferToScalar(3*nb_proc_dim(2)+2)
+    call test_status(error, 'bufferToScalar avec large shift', rank_world)
+
+    ! Test remeshing of untagged particles
+    error = test_part_remesh_no_tag()
+    call test_status(error, 'remeshing of cst field advected a cst v', rank_world)
+    error = test_part_remesh_no_tag('translation_field')
+    call test_status(error, 'remeshing of uncst field advected a cst v', rank_world)
+    error = test_part_remesh_no_tag(order_opt='p_O4')
+    call test_status(error, 'O4: remeshing of cst field advected a cst v', rank_world)
+    error = test_part_remesh_no_tag('translation_field', order_opt='p_O4')
+    call test_status(error, 'O4: remeshing of uncst field advected a cst v', rank_world)
+    error = test_part_remesh_no_tag(order_opt='p_M6')
+    call test_status(error, 'Mprime 6: remeshing of cst field advected a cst v', rank_world)
+    error = test_part_remesh_no_tag('translation_field', order_opt='p_M6')
+    call test_status(error, 'Mprime 6: remeshing of uncst field advected a cst v', rank_world)
+
+
+    ! ===== Test about procedures involved in computation of particles advection =====
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call test_title('particle: velocity interpolation', rank_world)
+
+    ! Test auxiliary procedures
+    error = test_part_AC_obtain_recevers()
+    call test_status(error, 'AC_obtain_recevers', rank_world)
+
+    ! Test velocity interpolation
+    error = test_part_AC_interpol_velocity(2, 0)
+    call test_status(error, 'velocity interpolation, no shift', rank_world)
+    error = test_part_AC_interpol_velocity(2, 1)
+    call test_status(error, 'velocity interpolation, shift', rank_world)
+
+    ! Test group variants
+    call test_title('particle: velocity interpolation (group)', rank_world)
+    call set_group_size(5)
+    error = test_part_AC_velo_determine_com(0)
+    call test_status(error, 'velocity: communication', rank_world)
+    error = test_part_AC_velo_determine_com(1)
+    call test_status(error, 'velocity: communication, shift', rank_world)
+    do direction = 1, 3
+        error = test_part_AC_velo_compute_group(direction,0)
+        call test_status(error, 'velocity: interpol', direction, rank_world)
+    end do
+
+    ! ===== Test others =====
+    call test_title('particle method - tag and bloc type', rank_world)
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    error = test_part_AC_type_and_block_O2()
+    call test_status(error, 'determine block type and tag particles', rank_world)
+    error = test_part_AC_type_and_block_O2_group()
+    call test_status(error, 'block and type for a group of line', rank_world)
+
+   ! ===== Test solvers =====
+
+    ! Test devoted to solver based on particles method
+    call test_title('particle method - advection test', rank_world)
+    error = test_part_advec_1D()
+    call test_status(error, 'advection - center block, no tag', rank_world)
+    error = test_part_advec_1D('left')
+    call test_status(error, 'advection - left block, no tag', rank_world)
+    verbose_more = .true.
+    error = test_part_advec_3D()
+    call test_status(error, 'advection - 3D tests', rank_world)
+    verbose_more = .false.
+
+    ! Idem for order 4 (corrected lambda 4)
+    call test_title('particle method - advection test', rank_world)
+    error = test_part_advec_1D(order_opt='p_O4')
+    call test_status(error, 'advection - center block, no tag', rank_world)
+    error = test_part_advec_1D('left', order_opt='p_O4')
+    call test_status(error, 'advection - left block, no tag', rank_world)
+    verbose_more = .true.
+    error = test_part_advec_3D(order='p_O4')
+    call test_status(error, 'advection - 3D tests', rank_world)
+    verbose_more = .false.
+
+    ! Idem for order M'6 remeshing formula
+    call test_title('particle method - advection test', rank_world)
+    error = test_part_advec_1D(order_opt='p_M6')
+    call test_status(error, 'advection - center block, no tag', rank_world)
+    error = test_part_advec_1D('left', order_opt='p_M6')
+    call test_status(error, 'advection - left block, no tag', rank_world)
+    verbose_more = .true.
+    error = test_part_advec_3D(order='p_M6')
+    call test_status(error, 'advection - 3D tests', rank_world)
+    verbose_more = .false.
+
+    ! Generic test
+!    call mpi_barrier(MPI_COMM_WORLD, ierr)
+!    call test_title('generic advection test', rank_world)
+!    error = test_advec_rot()
+!    call test_status(error, 'turning sphere', rank_world)
+
+
+    call mpi_finalize(ierr)
+
+end program advec_main
+
+!> @}
diff --git a/HySoP/src/Unstable/LEGI/test/src/Test_io/io_aux.f90 b/HySoP/src/Unstable/LEGI/test/src/Test_io/io_aux.f90
new file mode 100644
index 0000000000000000000000000000000000000000..12bfa054bb70c8f3a23f84cc798522c0e77bcf7b
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/test/src/Test_io/io_aux.f90
@@ -0,0 +1,98 @@
+!------------------------------------------------------------------------------
+!
+! PROGRAM : io_main
+!
+! DESCRIPTION: 
+!> This files contains binary test on io procedures.
+!!
+!! @details
+!! All these test are unit test : they return a logical value to check if 
+!! the code version pass it or not. 
+!!
+!! That is all these test are logical function, they return true if the result
+!! is the right one and false otherwise. 
+!! This first version test only the output at vtk xml format in parallel context.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module io_aux
+
+    implicit none
+
+    public      :: test_io_output 
+
+
+contains
+
+function test_io_output() result(success)
+    
+    ! External Library
+    use mpi
+    ! Scales code
+    use cart_topology
+    use cart_mesh
+    use parallel_io_bin
+    ! Test procedures
+    use test_common
+
+    implicit none
+
+    logical                                             :: success          ! logical result
+    real(WP), dimension(N_proc(1),N_proc(2),N_proc(3))  :: field            ! constant field used in this test
+    integer                                             :: tag_cst          ! tag for constant field used in this test
+    integer                                             :: tagX,tagY,tagZ   ! tag for unconstant fields used in this test
+    integer                                             :: i                ! some boucle indice
+    
+    
+    success = .true.
+
+    ! Initialize output context
+    call parallel_io_init_all(4, nb_proc_dim, length, myrank, coord, './io_res/')
+    call test_substatus('general context intialization', success, myrank)
+
+    call parallel_io_init_field('cst', tag_cst)
+    call parallel_io_init_field('X', tagX)
+    call parallel_io_init_field('Y', tagY)
+    call parallel_io_init_field('Z', tagZ)
+    call test_substatus('fields context intialization', success, myrank)
+
+    ! Make some output
+    field = 1.0
+    call parallel_write(tag_cst, field, 'cst')
+    field = 2.0
+    call parallel_write(tag_cst, field, 'cst')
+    call test_substatus('constant output', success, myrank)
+
+    do i = 1, N_proc(1)
+        field(i,:,:) = i+coord(1)*N_proc(1)
+    end do
+    call parallel_write(tag_cst, field, 'X')
+    field = 2*field
+    call parallel_write(tag_cst, field, 'X')
+    call test_substatus('X output', success, myrank)
+
+    do i = 1, N_proc(2)
+        field(:,i,:) = i+coord(2)*N_proc(2)
+    end do
+    call parallel_write(tag_cst, field, 'Y')
+    call test_substatus('Y output', success, myrank)
+
+    do i = 1, N_proc(3)
+        field(:,:,i) = i+coord(3)*N_proc(3)
+    end do
+    call parallel_write(tag_cst, field, 'Z')
+    call test_substatus('Z output', success, myrank)
+
+    
+    ! Free memory
+    call parallel_io_finish()
+
+    ! The main programm want receive a signal corresponding to error rather than success
+    success = .not. success
+   
+end function test_io_output
+
+end module io_aux
diff --git a/HySoP/src/Unstable/LEGI/test/src/Test_io/io_main.f90 b/HySoP/src/Unstable/LEGI/test/src/Test_io/io_main.f90
new file mode 100644
index 0000000000000000000000000000000000000000..45c63c1ffb8ea73177415dd7e6b1d4138a39eade
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/test/src/Test_io/io_main.f90
@@ -0,0 +1,78 @@
+!------------------------------------------------------------------------------
+!
+! PROGRAM : io_main
+!
+! DESCRIPTION: 
+!> This program use the function implemented in the module io_aux to
+!! test the io routines.
+!!
+!! @details
+!! All these test are unit test : they return a logical value to check if 
+!! the code version pass it or not. 
+!!
+!! That is all these test are logical function, they return true if the result
+!! is the right one and false otherwise. 
+!! This first version test only the output at vtk xml format in parallel context.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+program io_main
+
+    ! External Library
+    use mpi
+    ! Scales code
+    use cart_topology
+    use parallel_io_bin
+    ! Test procedures
+    use test_common
+    use io_aux
+
+    implicit none
+
+    logical     :: error = .false.  ! logical error
+    integer     :: ierr             ! mpi error code
+    integer     :: rank_world       ! processus rank on "MPI_COMM_WORLD"
+    integer     :: nb_proc,nb_procZ ! number of processus
+    integer     :: i                ! some boucle indice
+   
+    ! ===== Initialisation =====
+
+    ! Set the verbosity
+    verbose_test = .true.
+    verbose_more = .true.
+    ! Initialise mpi
+    call mpi_init(ierr)    
+    call mpi_comm_rank(MPI_COMM_WORLD, rank_world, ierr)
+    call mpi_comm_size(MPI_COMM_WORLD, nb_proc, ierr)
+
+    ! Cut the domain along Y and initialize the toppology
+    nb_procZ = 1
+    if ((mod(nb_proc,5)==0).and.(mod(100, nb_proc/5)==0)) then
+        nb_procZ = 5
+        nb_proc = nb_proc/5
+    else if ((mod(nb_proc,2)==0).and.(mod(100, nb_proc/2)==0)) then
+        nb_procZ = 2
+        nb_proc = nb_proc/2
+    else
+        if (mod(100, nb_proc)/=0) stop 'wrong number of processes : it have to divide 100'
+    end if
+    call cart_create((/ nb_proc, nb_procZ /), ierr)
+    call discretisation_default()
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+
+
+    ! ===== Test about io procedures =====
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call test_title('io - parallel vtk xml', rank_world)
+    error = test_io_output()
+    call test_status(error, 'write output', rank_world)
+    !error = test_io_input()
+    !call test_status(error, 'write input', rank_world)
+
+    call mpi_finalize(ierr)
+
+end program io_main
+
diff --git a/HySoP/src/Unstable/LEGI/test/src/Test_topo/topo_aux.f90 b/HySoP/src/Unstable/LEGI/test/src/Test_topo/topo_aux.f90
new file mode 100644
index 0000000000000000000000000000000000000000..a3e19682ae6e79ea9671d702aa371aae10a48a2c
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/test/src/Test_topo/topo_aux.f90
@@ -0,0 +1,274 @@
+!------------------------------------------------------------------------------
+!
+! MODULE: topo_aux
+!
+! DESCRIPTION: 
+!> This module provides different tests to validate the topology and the
+!! interface with the different data structures.
+!!
+!! @details
+!! Different automatic test are developped in order to check the mesh creation
+!! and the interface between the two data structures (the one used for the
+!! particular method and the one from the spectral part).
+!! All these test are unit test : they return a logical value to check if 
+!! the ierr version pass it or not. 
+!!
+!! That is all these test are logical function, they return true if the result
+!! is the right one and false otherwise. 
+!!
+!! The following test are included :
+!!      1 -> Initialise the topology, check the number of processes and
+!!              the communicators.
+!!      2 -> Check the periodicity.
+!!      3 -> Check if the subgrid on each processus have the good size
+!           
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module topo_aux
+
+    use mpi
+    use cart_topology
+    use string
+    use precision
+    implicit none
+
+    real(WP), private   :: epsilon_success = 1e-4     ! Error tolerance
+
+
+    ! Public procedures
+
+    ! ===== Test the topology =====
+    ! Public function
+    public              :: test_topo_init
+    public              :: test_topo_perio
+    public              :: test_topo_submesh
+
+
+
+
+contains
+
+!> Test the topology initialisation
+!!    @return success   = logical success (= false if the ierr pass the test)
+!! @details
+!!    Test the cartesian topology : check the number of processes and the
+!!    communicators.
+function test_topo_init() result(success)
+
+    use test_common
+
+    logical     :: success          ! success status
+
+    integer     :: ierr             ! mpi success ierr
+    integer     :: nb_proc          ! total number of processus
+    integer     :: nb_Y, nb_Z       ! actual number of processus in each direction
+    integer, dimension(2)   :: dims ! wanted number of processus in Y and Z direction
+
+    success = .true.
+    call mpi_comm_size(MPI_COMM_WORLD, nb_proc, ierr)
+
+    ! Cut the domain along Y and initialize the toppology
+    dims = (/ nb_proc, 1 /)
+    call cart_create(dims, ierr)
+
+    ! Check the number of process in each communicator
+    call mpi_comm_size(Y_comm, nb_Y, ierr)
+    if (nb_Y /= nb_proc) then
+        call test_substatus('number of processes in Y_comm', nb_Y, myrank)
+        call test_substatus('and it must be', nb_proc, myrank)
+        success = .false.
+    end if
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+
+    call mpi_comm_size(Z_comm, nb_Z, ierr)
+    if (nb_Z /= 1) then
+        call test_substatus('number of processes in Z_comm', nb_Z, myrank)
+        call test_substatus('and it must be', 1, myrank)
+        success = .false.
+    end if
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+
+    ! Compare it with the one saved in cart_topo
+    if (nb_Y /= nb_proc_dim(2)) then
+        call test_substatus('number of processes in Y_comm', nb_Y, myrank)
+        call test_substatus('and the solver beleave it is', nb_proc_dim(2), myrank)
+        success = .false.
+    end if
+    if (nb_Z /= nb_proc_dim(3)) then
+        call test_substatus('number of processes in Y_comm', nb_Z, myrank)
+        call test_substatus('and the solver beleave it is', nb_proc_dim(3), myrank)
+        success = .false.
+    end if
+
+    ! Return error = not succes
+    success = .not. success
+
+end function test_topo_init
+
+!> Check if it provide the right cartesian structure with a good periodicity
+!!    @return success   = logical success (= false if the ierr pass the test)
+function test_topo_perio() result(success)
+
+    use test_common
+
+    logical     :: success            ! success status
+    
+    integer     :: ierr                     ! mpi success ierr
+    integer     :: rankP, rankN             ! rank of previous and next (for shift)
+    integer     :: nb_Y, nb_Z               ! number of processus in each direction
+    integer, dimension(2)   :: dims         ! number of processus in Y and Z direction
+    integer, dimension(3)   :: coord_bis    ! coordonate of another processus
+    integer                 :: new_coord    ! theoritical coordinate
+
+    success = .true.
+    
+    ! Get the size
+    call mpi_comm_size(Y_comm, nb_Y, ierr)
+    call mpi_comm_size(Z_comm, nb_Z, ierr)
+
+    ! Shift along Y
+    ! Positive shift
+    call mpi_cart_shift(cart_comm, 2-1, 1, rankP, rankN, ierr)
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call mpi_cart_coords(cart_comm, rankP, 3, coord_bis, ierr)
+    new_coord = modulo(coord(2)-1, nb_Y)
+    if ((coord_bis(2) /=(new_coord)).OR.(coord_bis(3)/=coord(3)) ) then
+        call test_substatus('wrong Y-1 on rank', myrank, printer)
+        call test_substatus('theoritical Y-1', new_coord, printer)
+        call test_substatus('computed Y-1', coord_bis(2), printer)
+call test_substatus('X', coord_bis(1), printer)
+call test_substatus('Z', coord_bis(3), printer)
+        success = .false.
+    end if
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call mpi_cart_coords(cart_comm, rankN, 3, coord_bis, ierr)
+    new_coord = modulo(coord(2)+1, nb_Y)
+    if ((coord_bis(2) /=(new_coord)).OR.(coord_bis(3)/=coord(3)) ) then
+        call test_substatus('wrong Y+1 on rank', myrank, printer)
+        success = .false.
+    end if
+    ! Negative shift
+    call mpi_cart_shift(cart_comm, 2-1, -1, rankP, rankN, ierr)
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call mpi_cart_coords(cart_comm, rankN, 3, coord_bis, ierr)
+    new_coord = modulo(coord(2)-1, nb_Y)
+    if ((coord_bis(2) /=(new_coord)).OR.(coord_bis(3)/=coord(3)) ) then
+        call test_substatus('wrong Y+(-1) on rank', myrank, printer)
+        success = .false.
+    end if
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call mpi_cart_coords(cart_comm, rankP, 3, coord_bis, ierr)
+    new_coord = modulo(coord(2)+1, nb_Y)
+    if ((coord_bis(2) /=(new_coord)).OR.(coord_bis(3)/=coord(3)) ) then
+        call test_substatus('wrong Y-(-1) on rank', myrank, printer)
+        success = .false.
+    end if
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call test_substatus('topo and periodicity along Y', success, myrank)
+    
+    ! Shift along Z
+    ! Positive shift
+    call mpi_cart_shift(cart_comm, 3-1, 1, rankP, rankN, ierr)
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call mpi_cart_coords(cart_comm, rankP, 3, coord_bis, ierr)
+    new_coord = modulo(coord(3)-1, nb_Z)
+    if ((coord_bis(3) /=(new_coord)).OR.(coord_bis(2)/=coord(2)) ) then
+        call test_substatus('wrong Z-1 on rank', myrank, printer)
+        success = .false.
+    end if
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call mpi_cart_coords(cart_comm, rankN, 3, coord_bis, ierr)
+    new_coord = modulo(coord(3)+1, nb_Z)
+    if ((coord_bis(3) /=(new_coord)).OR.(coord_bis(2)/=coord(2)) ) then
+        call test_substatus('wrong Z+1 on rank', myrank, printer)
+        success = .false.
+    end if
+    ! Negative shift
+    call mpi_cart_shift(cart_comm, 3-1, -1, rankP, rankN, ierr)
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call mpi_cart_coords(cart_comm, rankN, 3, coord_bis, ierr)
+    new_coord = modulo(coord(3)-1, nb_Z)
+    if ((coord_bis(3) /=(new_coord)).OR.(coord_bis(2)/=coord(2)) ) then
+        call test_substatus('wrong Z+(-1) on rank', myrank, printer)
+        success = .false.
+    end if
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call mpi_cart_coords(cart_comm, rankP, 3, coord_bis, ierr)
+    new_coord = modulo(coord(3)+1, nb_Z)
+    if ((coord_bis(3) /=(new_coord)).OR.(coord_bis(2)/=coord(2)) ) then
+        call test_substatus('wrong Z-(-1)) on rank', myrank, printer)
+        success = .false.
+    end if
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call test_substatus('topo and periodicity along Z', success, myrank)
+
+    ! Big shift
+    call mpi_cart_coords(cart_comm, myrank, 3, coord, ierr)
+    call mpi_cart_shift(cart_comm, 2-1, 1+2*Nb_Y, rankP, rankN, ierr)
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call mpi_cart_coords(cart_comm, rankP, 3, coord_bis, ierr)
+    new_coord = modulo(coord(2)-1, nb_Y)
+    if ((coord_bis(2) /=(new_coord)).OR.(coord_bis(3)/=coord(3)) ) then
+        call test_substatus('wrong Y- on rank', myrank, printer)
+        call test_substatus('theoritical Y-', new_coord, printer)
+        call test_substatus('computed Y-', coord_bis(2), printer)
+        success = .false.
+    end if
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call mpi_cart_coords(cart_comm, rankN, 3, coord_bis, ierr)
+    new_coord = modulo(coord(2)+1, nb_Y)
+    if ((coord_bis(2) /=(new_coord)).OR.(coord_bis(3)/=coord(3)) ) then
+        call test_substatus('wrong Y+ on rank', myrank, printer)
+        success = .false.
+    end if
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+    call test_substatus('huge shift along Y', success, myrank)
+
+    ! Return error = not success
+    success = .not.success
+
+end function test_topo_perio
+
+
+!> Test the construction of subdomain and the mesh size in each processus
+!!    @return success   = logical success (= false if the ierr pass the test)
+!! @details
+!!    Check if the subgrid on each processus have the good size
+function test_topo_submesh() result(success)
+
+    use test_common
+
+    logical :: success
+    integer :: ierr ! mpi success code
+
+    success = .true.
+
+    call discretisation_default()
+    
+    ! Check the number of mesh
+    if (N_proc(1)/= 100) then
+        call test_substatus('local number of mesh along X', N_proc(1), myrank)
+        success = .false.
+    end if
+    if (N_proc(2)/= 100/nb_proc_dim(2)) then
+        call test_substatus('local number of mesh along Y', N_proc(2), myrank)
+        success = .false.
+    end if
+    if (N_proc(3)/= 100/nb_proc_dim(3)) then
+        call test_substatus('local number of mesh along Z', N_proc(3), myrank)
+        success = .false.
+    end if
+
+    ! Return error = not success
+    success = .not.success
+
+    call mpi_barrier(MPI_COMM_WORLD, ierr)
+
+end function test_topo_submesh
+
+
+end module topo_aux
diff --git a/HySoP/src/Unstable/LEGI/test/src/Test_topo/topo_aux_interface.f90 b/HySoP/src/Unstable/LEGI/test/src/Test_topo/topo_aux_interface.f90
new file mode 100644
index 0000000000000000000000000000000000000000..a5e499172e377fad41383bcf3f7f5283c9b56f2d
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/test/src/Test_topo/topo_aux_interface.f90
@@ -0,0 +1,72 @@
+!------------------------------------------------------------------------------
+!
+! MODULE: topo_aux_interface
+! DESCRIPTION: 
+!> This module provides different tests to validate the interface with the different 
+!! data structures.
+!!
+!! @details
+!! Different automatic test are developped in order to check the interface between the 
+!! two data structures (the one used for the particular method and the one from the 
+!! spectral part). All these test are unit test : they return a logical value to check
+!! if the library pass it or not. 
+!!
+!! That is all these test are logical function, they return true if the result
+!! is the right one and false otherwise. 
+!!
+!! The following test are included :
+!!      1 -> Test the interface between the data structure in the advection solver 
+!!              based on particular method and the one used in the
+!!              pseudo-spectral method.
+!!      X -> TODO check the output wich could be done directly from this
+!!          format.
+!           
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module topo_aux_interface
+
+    use cart_topology
+    use precision
+
+    implicit none
+
+    ! ===== Test the topology =====
+    ! Public function
+    public              :: test_topo_coupling
+
+contains
+
+!> Test the communication between the pseudo-spectral and particles method.
+!!    @return success   = logical success (= false if the ierr pass the test)
+!! @details
+!!        Some advection-diffusion law on scalar field are solved by mixed numerical
+!!    method. In such a case, the advection part are solved with a particular
+!!    solver (with time order 2 and space-order of 2 or 4) and the diffusion part
+!!    are compute with a pseudo-spectral solver. The operator splitting is a
+!!    Strang splitting. All these method are provided in a parrallel
+!!    implemention. The data distribution used in the pseudo-spectral solver is
+!!    described in the module "datalayout" and the one used in the parrticle
+!!    solver is described in cart_topology. Of course, to avoid useless
+!!    communication and ensure efficient implementation, these data distribution
+!!    are supposed to be the same between the storage of field in the real space
+!!    described in "datalayout" and the one described in "cart_topology". This
+!!    function are provided to test it and check if the procedure provided in the
+!!    different solver are compatible.
+function test_topo_coupling() result(success)
+
+    use cart_topology
+
+    logical     :: success
+
+    success = .true.
+
+
+    success = .not. success
+
+end function test_topo_coupling
+
+end module topo_aux_interface
diff --git a/HySoP/src/Unstable/LEGI/test/src/Test_topo/topo_main.f90 b/HySoP/src/Unstable/LEGI/test/src/Test_topo/topo_main.f90
new file mode 100644
index 0000000000000000000000000000000000000000..440ad2b60d59c4721743b96538e47a7a2f430c9e
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/test/src/Test_topo/topo_main.f90
@@ -0,0 +1,60 @@
+!------------------------------------------------------------------------------
+!
+! PROGRAM : topo_main
+!
+! DESCRIPTION: 
+!> Test the cartesian topology and all the associated variable. 
+!! test the advection solver.
+!! This program perform all the test include in "topo_aux". This module provide
+!! unit test, ie logical function wich return a logical error.
+!! There is a verbosity parameter to decide to print on screen the status of
+!! result of each test (and sub-test) or not.
+!!
+!! See topo_aux for a list of available test.
+!!
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+program topo_main
+
+    use mpi
+    use topo_aux
+    use topo_aux_interface
+    use test_common
+
+    implicit none
+
+    logical     :: error = .true.   ! logical error
+    integer     :: ierr             ! mpi error code
+    integer     :: rank_world       ! processus rank on "MPI_COMM_WORLD"
+    integer     :: nb_proc          ! number of processus
+    
+    ! Set the verbosity
+    verbose_test = .true.
+    verbose_more = .true.
+
+    ! Initialise mpi
+    call mpi_init(ierr)    
+    call mpi_comm_size(MPI_COMM_WORLD, nb_proc, ierr)
+    call mpi_comm_rank(MPI_COMM_WORLD, rank_world, ierr)
+    call mpi_test_substatus(ierr, error, 'mpi initialization', rank_world)
+
+    ! Initialize the topology and test is
+    error=test_topo_init()
+    call test_status(error, '(mpi) topology initialisation', rank_world)
+
+    ! Initialize the topology and test is
+    error=test_topo_perio()
+    call test_status(error, 'periodicity', rank_world)
+
+    ! Initialize the topology and test is
+    error=test_topo_submesh()
+    call test_status(error, 'subdomain size', rank_world)
+
+    call mpi_finalize(ierr)
+
+end program topo_main
+
diff --git a/HySoP/src/Unstable/LEGI/test/src/test_common.f90 b/HySoP/src/Unstable/LEGI/test/src/test_common.f90
new file mode 100644
index 0000000000000000000000000000000000000000..f2ffd7678ffe201dff3f47f0ea69f3756787213c
--- /dev/null
+++ b/HySoP/src/Unstable/LEGI/test/src/test_common.f90
@@ -0,0 +1,536 @@
+!------------------------------------------------------------------------------
+!
+! MODULE: test_advection
+!
+! DESCRIPTION: 
+!> This module provide different tools useful to perform test.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module test_common
+
+    use string
+    use precision
+    
+    implicit none
+    
+    ! ===== Public variables =====
+    !> To print some status message during the test
+    logical :: verbose_test = .true.
+    !> More verbosity !
+    logical :: verbose_more = .true.
+    !> To choose wich processes lead the screen output
+    integer :: printer = 0
+
+    ! ===== Public procedure =====
+    ! - To print some information about the test (verbosity case)
+    public  :: test_title
+    public  :: test_status
+    public  :: mpi_test_substatus
+    public  :: test_substatus
+    public  :: test_check_success
+
+    ! ===== Private procedure =====
+    private :: test_status_M
+    private :: test_status_MI
+    private :: test_substatus_M
+    private :: test_substatus_MI
+    private :: test_substatus_MR
+    private :: test_substatus_ML
+    private :: test_substatus_M3I
+    private :: test_check_success_S
+    private :: test_check_success_F
+    private :: test_check_success_F2
+    private :: test_check_success_F3
+    private :: test_check_success_FI
+    private :: test_check_success_FL
+    private :: test_check_success_F3L
+
+    
+    ! ===== Private variables =====
+    !> Error tolerance
+    real(WP), private   :: epsilon_success = 1e-4 
+
+    ! ===== Interface =====
+    interface test_status
+        module procedure test_status_M, test_status_MI
+    end interface test_status
+
+    interface test_substatus
+        module procedure test_substatus_M, test_substatus_MI, test_substatus_MR &
+            & , test_substatus_ML, test_substatus_M3I
+    end interface test_substatus
+
+    interface test_check_success
+        module procedure test_check_success_S, test_check_success_F, &
+            & test_check_success_F2, test_check_success_F3, test_check_success_FI, &
+            & test_check_success_F2I, test_check_success_FL, test_check_success_F3L
+    end interface test_check_success
+
+
+
+contains
+
+!> Diffuse the error status and print the test status
+!!  @param[in]      message = information message
+!!  @param[in]      rank    = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_title(message, rank)
+
+    use cart_topology
+
+    character(len =*), intent(in)   :: message
+    integer, intent(in)             :: rank
+
+    character(len=40)               :: mess_bis         ! message copy
+
+
+    if((verbose_test).and.(rank==printer)) then
+        mess_bis = message
+        write(*,'(A1,1X,A40)')'#', mess_bis
+        if((verbose_more).and.(rank==printer)) print*,''
+    end if
+
+end subroutine test_title
+
+
+!> Diffuse the error status and print the test status
+!!  @param[in, out] error   = logical equal true if there is an error
+!!  @param[in]      message = information message
+!!  @param[in]      rank    = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_status_M(error, message, rank)
+
+    use mpi
+    use cart_topology
+
+    logical, intent(inout)             :: error
+    character(len =*), intent(in)   :: message
+    integer, intent(in)             :: rank
+
+    character(len=40)               :: mess_bis         ! message copy
+    integer                         :: error_int = 0
+    integer                         :: error_red = 0
+    integer                         :: ierr             ! mpi error code
+
+    if(error .eqv. .true.) error_int = 1
+    call mpi_allreduce(error_int, error_red, 1, MPI_INTEGER, MPI_MAX, MPI_COMM_WORLD, ierr)
+    if(error_red==1) error=.true.
+
+    if((verbose_test).and.(rank==printer)) then
+        mess_bis = message
+        write(*,'(5X,A2,2X,A40,X,A2,L2)')'->', mess_bis, '=', .not.error
+        if((verbose_more).and.(rank==printer)) print*,''
+    end if
+
+end subroutine test_status_M
+
+
+!> Diffuse the error status and print the test status
+!!  @param[in, out] error           = logical equal true if there is an error
+!!  @param[in]      message         = information message
+!!  @param[in]      message_int     = integer added to the information message
+!!  @param[in]      rank            = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_status_MI(error, message, message_int , rank)
+
+    use mpi
+    use cart_topology
+
+    logical, intent(inout)          :: error
+    character(len =*), intent(in)   :: message
+    integer, intent(in)             :: rank, message_int
+
+    character(len=37)               :: mess_bis         ! message copy
+    integer                         :: error_int = 0
+    integer                         :: error_red = 0
+    integer                         :: ierr             ! mpi error code
+
+    if(error .eqv. .true.) error_int = 1
+    call mpi_allreduce(error_int, error_red, 1, MPI_INTEGER, MPI_MAX, MPI_COMM_WORLD, ierr)
+    if(error_red==1) error=.true.
+
+    if((verbose_test).and.(rank==printer)) then
+        mess_bis = message
+        write(*,'(5X,A2,2X,A37,X,I2,X,A2,L2)')'->', mess_bis, message_int, '=', .not.error
+        if((verbose_more).and.(rank==printer)) print*,''
+    end if
+
+end subroutine test_status_MI
+
+
+!> Use a mpi error code to update the test status and print it
+!!  @param[in]  ierr    = mpi error code
+!!  @param[in]  error   = logical equal true if there is an error
+!!  @param[in]  message = information message
+!!  @param[in]  rank    = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine mpi_test_substatus(ierr, error, message, rank)
+
+    use mpi
+
+    integer, intent(in)             :: ierr
+    logical, intent(inout)          :: error
+    character(len =*), intent(in)   :: message
+    integer, intent(in)             :: rank
+
+    if (ierr /= MPI_SUCCESS) then
+        error = .false.
+    end if
+
+    call test_substatus(message, error, rank)
+    error = .not. error
+
+end subroutine mpi_test_substatus
+
+!> Print a sub-status message
+!!  @param[in]  message = information message
+!!  @param[in]  rank    = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_substatus_M(message, rank)
+
+    character(len =*), intent(in)  :: message
+    integer, intent(in)             :: rank
+
+    if((verbose_more).and.(rank==printer)) then
+        write(*,'(10X,A2,2X,A40)')'+', message
+    end if
+
+end subroutine test_substatus_M
+
+
+!> Print a sub-status message and a integer
+!!  @param[in]  message = information message
+!!  @param[in]  i       = integer to print
+!!  @param[in]  rank    = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_substatus_MI(message, i, rank)
+
+    character(len =*), intent(in)  :: message
+    integer, intent(in)             :: i
+    integer, intent(in)             :: rank
+
+    if((verbose_more).and.(rank==printer)) then
+        write(*,'(10X,A2,2X,A40,X,A1,X,I5)')'+', message, '=', i
+    end if
+
+end subroutine test_substatus_MI
+
+
+!> Print a sub-status message and a integer
+!!  @param[in]  message = information message
+!!  @param[in]  i       = integer table of dimension 3 to print
+!!  @param[in]  rank    = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_substatus_M3I(message, i, rank)
+
+    character(len =*), intent(in)  :: message
+    integer, dimension(3), intent(in)             :: i
+    integer, intent(in)             :: rank
+
+    if((verbose_more).and.(rank==printer)) then
+        write(*,'(10X,A2,2X,A40,X,A1,X,I3,X,A1,X,I3,X,A1,X,I3)')'+',message,'=',i(1),',',i(2),',',i(3)
+    end if
+
+end subroutine test_substatus_M3I
+
+
+!> Print a sub-status message and a real
+!!  @param[in]  message = information message
+!!  @param[in]  r       = real to print
+!!  @param[in]  rank    = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_substatus_MR(message, r, rank)
+
+    use precision
+
+    character(len=* ), intent(in)   :: message
+    real(WP), intent(in)            :: r
+    integer, intent(in)             :: rank
+
+    if((verbose_more).and.(rank==printer)) then
+        write(*,'(10X,A2,2X,A40,X,A1,X,F8.5)')'+', message, '=', r
+    end if
+
+end subroutine test_substatus_MR
+
+
+!> Print a sub-status message and a logical (after sending its value if false)
+!!  @param[in]      message = information message
+!!  @param[in,out]  l       = logical to print
+!!  @param[in]      rank    = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_substatus_ML(message, l, rank)
+
+    use precision
+    use mpi
+    use cart_topology
+
+    character(len =*), intent(in)   :: message
+    logical, intent(inout)          :: l
+    integer, intent(in)             :: rank
+    integer                         :: error_int = 0
+    integer                         :: error_red = 0
+    integer                         :: ierr             ! mpi error code
+
+    if(l .eqv. .false.) error_int = 1
+    call mpi_allreduce(error_int, error_red, 1, MPI_INTEGER, MPI_MAX, MPI_COMM_WORLD, ierr)
+    if(error_red==1) l=.false.
+
+    if((verbose_more).and.(rank==printer)) then
+        write(*,'(10X,A2,2X,A40,X,A1,X,L5)')'+', message, '=', l
+    end if
+
+end subroutine test_substatus_ML
+
+
+
+!> Check if the numerical success stay under a threshold - constant theoritical
+!! solution
+!!     @param[in,out]   success     = test success (= false if the code pass the test)
+!!     @param[in]       scal1D      = numerical value of the scalar (1D)
+!!     @param[in]       good_scal   = theoritical value of the scalar
+!!     @param[in]       rank        = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_check_success_S(scal1D, good_scal, success, rank)
+
+    use precision
+    use cart_topology
+
+    real(WP), intent(in)                        :: good_scal        ! theoritical value of scal1D
+    real(WP), dimension(:),intent(in)           :: scal1D           ! the computed scalar field
+    logical, intent(inout)                      :: success
+    integer, intent(in)                         :: rank
+
+    integer                                     :: success_inf        ! norm L_inf of the success
+
+    success_inf = maxval(scal1D - good_scal)
+    if (success_inf>=epsilon_success) then 
+        success = .false.
+        call test_substatus('XXX error', rank)
+        call test_substatus('max scal0D', maxval(scal1D), rank)
+        call test_substatus('min scal0D', minval(scal1D), rank)
+        call test_substatus('and it must be', good_scal, rank)
+    end if
+
+end subroutine test_check_success_S
+
+
+!> Check if two integer 1-dimensionnal table are equal.
+!!     @param[in,out]   success     = test success (= false if the code pass the test)
+!!     @param[in]       scal1D      = numerical value of the scalar (1D)
+!!     @param[in]       good_scal   = theoritical value of the scalar
+!!     @param[in]       rank        = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_check_success_FI(scal1D, good_scal, success, rank)
+
+    use precision
+    use cart_topology
+
+    integer, dimension(:),intent(in)            :: good_scal        ! theoritical value of scal1D
+    integer, dimension(:),intent(in)            :: scal1D           ! the computed scalar field
+    logical, intent(inout)                      :: success
+    integer, intent(in)                         :: rank
+
+    integer                                     :: success_inf        ! norm L_inf of the success
+
+    success_inf = maxval(abs(scal1D - good_scal))
+
+    if (success_inf>=epsilon_success) then 
+        success = .false.
+        call test_substatus('XXX error', rank)
+        call test_substatus('max scal1D', maxval(scal1D), rank)
+        call test_substatus('min scal1D', minval(scal1D), rank)
+        call test_substatus('max solution', maxval(good_scal), rank)
+    end if
+
+end subroutine test_check_success_FI
+
+
+!> Check if two integer 2-dimensionnal table are equal.
+!!     @param[in,out]   success     = test success (= false if the code pass the test)
+!!     @param[in]       scal1D      = numerical value of the scalar (1D)
+!!     @param[in]       good_scal   = theoritical value of the scalar
+!!     @param[in]       rank        = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_check_success_F2I(array2D, good_array, success, rank)
+
+    use precision
+    use cart_topology
+
+    integer, dimension(:,:), intent(in)         :: good_array       ! theoritical value of array2D
+    integer, dimension(:,:), intent(in)         :: array2D          ! the computed value
+    logical, intent(inout)                      :: success
+    integer, intent(in)                         :: rank
+
+    integer                                     :: success_inf        ! norm L_inf of the success
+
+    success_inf = maxval(abs(array2D - good_array))
+
+    if (success_inf>=0.5) then ! As we consider integer, this error is enough
+        success = .false.
+        call test_substatus('XXX error', rank)
+        call test_substatus('max array2D', maxval(array2D), rank)
+        call test_substatus('min array2D', minval(array2D), rank)
+        call test_substatus('max solution', maxval(good_array), rank)
+    end if
+
+end subroutine test_check_success_F2I
+
+
+!> Check if the numerical success stay under a threshold - 1D space-dependant analytic solution
+!!     @param[in,out]   success     = test success (= false if the code pass the test)
+!!     @param[in]       scal1D      = numerical value of the scalar (1D)
+!!     @param[in]       good_scal   = theoritical value of the scalar
+!!     @param[in]       rank        = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_check_success_F(scal1D, good_scal, success, rank)
+
+    use precision
+    use cart_topology
+
+    real(WP), dimension(:),intent(in)           :: good_scal        ! theoritical value of scal1D
+    real(WP), dimension(:),intent(in)           :: scal1D           ! the computed scalar field
+    logical, intent(inout)                      :: success
+    integer, intent(in)                         :: rank
+
+    real(WP)                                    :: success_inf        ! norm L_inf of the success
+
+    success_inf = maxval(abs(scal1D - good_scal))
+
+    if (success_inf>=epsilon_success) then 
+        success = .false.
+        call test_substatus('XXX error', rank)
+        call test_substatus('max scal1D', maxval(scal1D), rank)
+        call test_substatus('min scal1D', minval(scal1D), rank)
+        call test_substatus('max solution', maxval(good_scal), rank)
+        call test_substatus('min solution', minval(good_scal), rank)
+    end if
+
+end subroutine test_check_success_F
+
+
+!> Check if the numerical success stay under a threshold - 2D space-dependant analytic solution
+!!     @param[in,out]   success     = test success (= false if the code pass the test)
+!!     @param[in]       scal2D      = numerical value of the scalar (2D)
+!!     @param[in]       good_scal   = theoritical value of the scalar
+!!     @param[in]       rank        = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_check_success_F2(scal2D, good_scal, success, rank)
+
+    use precision
+    use cart_topology
+
+    real(WP), dimension(:,:),intent(in)         :: good_scal        ! theoritical value of scal1D
+    real(WP), dimension(:,:),intent(in)         :: scal2D           ! the computed scalar field
+    logical, intent(inout)                      :: success
+    integer, intent(in)                         :: rank
+
+    real(WP)                                    :: success_inf        ! norm L_inf of the success
+
+    success_inf = maxval(abs(scal2D - good_scal))
+    if (success_inf>=epsilon_success) then 
+        success = .false.
+        call test_substatus('XXX error', rank)
+        call test_substatus('max scal2D', maxval(scal2D), rank)
+        call test_substatus('min scal2D', minval(scal2D), rank)
+        call test_substatus('max solution', maxval(good_scal), rank)
+        call test_substatus('min solution', minval(good_scal), rank)
+    end if
+
+end subroutine test_check_success_F2
+
+
+!> Check if the numerical success stay under a threshold - 3D space-dependant analytic solution
+!!     @param[in,out]   success     = test success (= false if the code pass the test)
+!!     @param[in]       scal3D      = numerical value of the scalar (3D)
+!!     @param[in]       good_scal   = theoritical value of the scalar
+!!     @param[in]       rank        = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_check_success_F3(scal3D, good_scal, success, rank)
+
+    use precision
+    use cart_topology
+    use mpi
+
+    real(WP), dimension(:,:,:),intent(in)   :: good_scal        ! theoritical value of scal1D
+    real(WP), dimension(:,:,:),intent(in)   :: scal3D           ! the computed scalar field
+    logical, intent(inout)                  :: success
+    integer, intent(in)                     :: rank
+
+    real(WP)                                :: success_inf      ! norm L_inf of the success
+    real(WP)                                :: success_inf_gl   ! norm L_inf of the success
+    integer                                 :: ierr             ! mpi error code
+
+integer, dimension(3) :: temp
+
+    success_inf = maxval(abs(scal3D - good_scal))
+    success_inf_gl=success_inf
+    call mpi_reduce(success_inf, success_inf_gl, 1, MPI_DOUBLE_PRECISION, MPI_MAX, 0, MPI_COMM_WORLD, ierr)
+    if (success_inf_gl>=epsilon_success) success = .false.
+    call test_substatus('norm inf of error', success_inf_gl, rank)
+
+    if (success_inf>=epsilon_success) then 
+        call test_substatus('XXX error', rank)
+        temp = minloc(scal3D - good_scal)
+        call test_substatus('error min in', temp, rank)
+        call test_substatus('scal3D', scal3D(temp(1), temp(2), temp(3)), rank)
+        call test_substatus('sol', good_scal(temp(1), temp(2), temp(3)), rank)
+        temp = maxloc(scal3D - good_scal)
+        call test_substatus('error max in', temp, rank)
+        call test_substatus('scal3D', scal3D(temp(1), temp(2), temp(3)), rank)
+        call test_substatus('sol', good_scal(temp(1), temp(2), temp(3)), rank)
+    end if
+
+end subroutine test_check_success_F3
+
+
+!> Check if two 1D logical field are identical or not
+!!     @param[in,out]   success     = test success (= false if the code pass the test)
+!!     @param[in]       scal3D      = numerical value of the logical field (1D)
+!!     @param[in]       good_scal   = theoritical value
+!!     @param[in]       rank        = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_check_success_FL(scal3D, good_scal, success, rank)
+
+    use precision
+    use cart_topology
+    use mpi
+
+    logical, dimension(:),intent(in)        :: good_scal        ! theoritical value of scal1D
+    logical, dimension(:),intent(in)        :: scal3D           ! the computed scalar field
+    logical, intent(inout)                  :: success
+    integer, intent(in)                     :: rank
+
+    logical                                     :: success_inf      ! local error
+    logical                                     :: success_inf_gl   ! global error
+    integer                                     :: ierr             ! mpi error code
+
+
+    success_inf = all(scal3D .eqv. good_scal)
+    success_inf_gl=success_inf
+    call mpi_reduce(success_inf, success_inf_gl, 1, MPI_LOGICAL, MPI_LAND, 0, MPI_COMM_WORLD, ierr)
+
+    success = success_inf_gl
+
+end subroutine test_check_success_FL
+
+
+!> Check if two 3D logical field are identical or not
+!!     @param[in,out]   success     = test success (= false if the code pass the test)
+!!     @param[in]       scal3D      = numerical value of the logical field (3D)
+!!     @param[in]       good_scal   = theoritical value
+!!     @param[in]       rank        = mpi rank to avoid to print message for each processes (usefull if there is a lot of them)
+subroutine test_check_success_F3L(scal3D, good_scal, success, rank)
+
+    use precision
+    use cart_topology
+    use mpi
+
+    logical, dimension(:,:,:),intent(in)        :: good_scal        ! theoritical value of scal1D
+    logical, dimension(:,:,:),intent(in)        :: scal3D           ! the computed scalar field
+    logical, intent(inout)                      :: success
+    integer, intent(in)                         :: rank
+
+    logical                                     :: success_inf      ! local error
+    logical                                     :: success_inf_gl   ! global error
+    integer                                     :: ierr             ! mpi error code
+
+
+    success_inf = all(scal3D .eqv. good_scal)
+    success_inf_gl=success_inf
+    call mpi_reduce(success_inf, success_inf_gl, 1, MPI_LOGICAL, MPI_LAND, 0, MPI_COMM_WORLD, ierr)
+
+    success = success_inf_gl
+
+end subroutine test_check_success_F3L
+
+
+end module test_common
diff --git a/HySoP/src/Unstable/Plouhmans.f90 b/HySoP/src/Unstable/Plouhmans.f90
new file mode 100644
index 0000000000000000000000000000000000000000..30be1c63869d97b8cbfac1b33ef429573046142d
--- /dev/null
+++ b/HySoP/src/Unstable/Plouhmans.f90
@@ -0,0 +1,132 @@
+!> Temp modules for ppm_client
+module ppmExample
+
+  use ppm_module_init
+  use ppm_module_data, only : ppm_kind_double
+  use ppm_module_finalize
+    
+  !  use client_io
+  use client_data, only: mk, dime
+  ! Physical domain and grid
+  use Domain
+  ! Fields on the grid
+  use Fields, only: init_fields, velocity, vorticity
+  ! Topology
+  use client_topology, only: init_topo,  topo
+  ! Multigrid solver
+  !use Solver, only : init_multigrid, solve_poisson
+  
+  use mpi
+  use WrapFort
+
+  implicit none
+
+  include "ppm_numerics.h"
+
+  integer, private :: info 
+
+contains
+
+  subroutine init_client()
+
+    integer :: prec,tol
+    ! MPI comm
+    integer :: comm
+    ! debug mode
+    integer :: debug
+    ! error status
+    integer :: info
+    !======================
+    ! Init ppm 
+    !======================
+    prec = ppm_kind_double ! Defined in ppm_param.h
+    comm = MPI_COMM_WORLD  ! 
+    debug = 2
+    tol = -10
+    info = -1
+    call ppm_init(dime,prec,tol,comm,debug,info)
+    
+    !======================
+    ! Read and broadcast 
+    ! some parameters 
+    !======================
+    ! call read_data()
+
+    !======================
+    ! Geometry and grid
+    !======================
+    call init_geometry()
+    call init_grid()
+    
+    !======================
+    ! Creates the topology
+    !======================
+    call init_topo(domain_minCoords, domain_maxCoords, domain_bc, domain_ghostsize, grid_resolution)
+    
+    !======================
+    ! Fields allocation
+    !======================
+    call init_fields(domain_ghostsize, topo)
+    
+    !======================
+    ! Init solver
+    !======================
+!    call init_multigrid(topo%ID, mesh%ID, domain_ghostsize, domain_bc)
+
+    !======================
+    ! Init Physics
+    !======================
+    velocity = 0.0_mk
+    vorticity = 0.0_mk
+
+    !======================
+    ! Init Particles
+    !======================
+     
+    print *, "end of parmes:ppm:init_client"
+
+  end subroutine init_client
+
+  subroutine main_client() bind(c,name='plouhmans')
+
+    ! Multigrid parameters ...
+    print *, 'run ppm simulation ...'
+    ! init ppm ...
+    call init_client()
+
+!    call solve_poisson(topo%ID, stream_function, vorticity)
+    
+    call ppm_finalize(info)
+    print *, 'end ppm simulation'
+  end subroutine main_client
+
+  subroutine read_data()
+
+    ! Set precision
+!    mpi_prec = MPI_DOUBLE_PRECISION
+
+!!$    ! Read input parameters on proc 0
+!!$    if(rank == 0) call readparams()
+!!$    
+!!$        
+!!$    call MPI_BCast(runtag,256,MPI_CHARACTER,0,MPI_COMM_WORLD,info)
+!!$    call MPI_BCast(iruntag,1,MPI_INTEGER,0,MPI_COMM_WORLD,info)
+!!$    !----------------------------------------------------------------------------!
+!!$    ! MPI Broadcasts ...
+!!$    !----------------------------------------------------------------------------!
+!!$    call MPI_BCast(nx,dime,MPI_INTEGER,0,MPI_COMM_WORLD,info)
+!!$    call MPI_BCast(min_physg,dime,mpi_prec,0,MPI_COMM_WORLD,info)
+!!$    call MPI_BCast(max_physg,dime,mpi_prec,0,MPI_COMM_WORLD,info)
+!!$    call MPI_BCast(dt,1,mpi_prec,0,MPI_COMM_WORLD,info)
+!!$    call MPI_BCast(dt_max,1,mpi_prec,0,MPI_COMM_WORLD,info)
+!!$    call MPI_BCast(tend,1,mpi_prec,0,MPI_COMM_WORLD,info)
+!!$    call MPI_BCast(itend,1,mpi_prec,0,MPI_COMM_WORLD,info)
+!!$    call MPI_BCast(nu,1,mpi_prec,0,MPI_COMM_WORLD,info)
+!!$    call mpi_bcast(verbose,1,mpi_logical,0,mpi_comm_world,info)
+!!$    call mpi_bcast(maxlev,1,mpi_integer,0,mpi_comm_world,info)
+
+    
+
+  end subroutine read_data
+
+end module ppmexample
diff --git a/HySoP/src/Unstable/SetsIndicators.f90 b/HySoP/src/Unstable/SetsIndicators.f90
new file mode 100755
index 0000000000000000000000000000000000000000..6f38e3916b11c8defae84d5f203bcea196065af2
--- /dev/null
+++ b/HySoP/src/Unstable/SetsIndicators.f90
@@ -0,0 +1,516 @@
+!> Penalization stuff (init chi, penalize vorticity)
+!! Note : drag/lift are also computed with penalize routines. 
+module SetsIndicators
+
+  use client_data
+  use VectorCalculus
+  !use client_topology, only : nsublist
+  use mpi,only:MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD
+  use Domain
+  implicit none
+
+  private
+
+  public :: init_obstacles,compute_control_box,compute_test,nocaForces,laplacian,chi_sphere,chi_boundary,chi_box,&
+       getMemoryForIndicators
+
+  ! Indicators functions 
+  ! The top and bottom (z axis) boundaries
+  integer, dimension(:,:), pointer  :: chi_boundary=>NULL()
+  ! The sphere
+  integer, dimension(:,:), pointer  :: chi_sphere=>NULL()
+
+  ! Axes z is south-north, x down to upstream and y east-west
+  !> control volume function
+  integer, dimension(:,:), pointer :: chi_box => NULL() 
+  !> north boundary ind. func (ie zmax)
+  integer, dimension(:,:), pointer :: chi_north => NULL() 
+  !> south boundary ind. func (ie zmin)
+  integer, dimension(:,:), pointer :: chi_south => NULL()
+  !> east boundary ind. func (ie ymin)
+  integer, dimension(:,:), pointer :: chi_east => NULL()
+  !> west boundary ind. func (ie ymax)
+  integer, dimension(:,:), pointer :: chi_west => NULL()
+  !> upstream boundary ind. func (ie xmax)
+  integer, dimension(:,:), pointer :: chi_up => NULL() ! upstream
+  !> downstream boundary ind. func (ie xmin)
+  integer, dimension(:,:), pointer :: chi_down => NULL() ! downstream 
+  !> 
+  integer,parameter :: Down=1,Up=2,West=3,East=4,South=5,North=6
+  !> Normal to each face of the control volume
+  real(mk),dimension(dime,2*dime) :: normal
+  !> A buffer for force on control volume, used to save force value on previous time step
+  real(mk),dimension(dime)::bufferForce
+  !> normalisation factor used to compute the drag in the Noca's way
+  real(mk) :: coef
+  real(mk),parameter :: uinf=1.0
+
+  ! temp to avoid ppm dependence
+  integer, parameter :: nsublist = 1
+
+
+contains
+
+  !> compute chi functions for penalization at the boundaries to enforce dirichlet conditions on zmin and zmax
+  subroutine init_obstacles(resolution,step,lower,upper,center,radius,layer,coordMin)
+    !> Number of points in each dir 
+    integer, dimension(dime), intent(in)  :: resolution
+    !> Grid steps sizes
+    real(mk), dimension(dime), intent(in) :: step
+    !> Dimensions of the boundary layers
+    real(mk), dimension(dime), intent(in) :: upper,lower
+    !> position of the center of the sphere
+    real(mk),dimension(dime),intent(in):: center
+    !> Radius of the sphere
+    real(mk),intent(in) :: radius
+    !> Boundary layer thickness
+    real(mk), intent(in) :: layer
+    !> Coordinates of the lowest point of the local domain
+    real(mk),dimension(dime),intent(in) :: coordMin
+
+    integer, dimension(:,:), allocatable :: tmp_boundary,tmp_sphere
+    integer::istat,i,j,k
+    real(mk),dimension(dime) :: coords
+    real(mk) :: dist
+    integer :: sizeMaxChi,count_boundary,count_sphere
+    real :: layerMin,layerMax
+    sizeMaxChi = product(resolution)
+    allocate(tmp_boundary(dime,sizeMaxChi),stat=istat)
+    allocate(tmp_sphere(dime,sizeMaxChi),stat=istat)
+    
+    layerMin = lower(c_Z) + layer
+    layerMax = upper(c_Z) - layer
+
+    if(istat.ne.0) stop 'Chi-boundaries function allocation error.'
+    count_boundary=0
+    count_sphere =0
+    do k=1,resolution(c_Z)
+       coords(c_Z)=coordMin(c_Z) + (k-1)*step(c_Z)
+       do j=1,resolution(c_Y)
+          coords(c_Y)=coordMin(c_Y) + (j-1)*step(c_Y)
+          do i=1,resolution(c_X)
+             coords(c_X)=coordMin(c_X) + (i-1)*step(c_X)
+             if( (coords(c_Z)>layerMax).or.(coords(c_Z)<layerMin)) then
+                count_boundary=count_boundary+1
+                tmp_boundary(c_X,count_boundary)=i
+                tmp_boundary(c_Y,count_boundary)=j
+                tmp_boundary(c_Z,count_boundary)=k
+             end if
+             dist = dot_product(coords-center,coords-center) - radius**2
+             if(dist <=0.0) then ! We are on or in the sphere ...
+                count_sphere=count_sphere+1 
+                tmp_sphere(c_X,count_sphere)=i
+                tmp_sphere(c_Y,count_sphere)=j
+                tmp_sphere(c_Z,count_sphere)=k
+             end if
+          end do
+       end do
+    end do
+    allocate(chi_boundary(dime,count_boundary))
+    chi_boundary=tmp_boundary(:,1:count_boundary)
+    allocate(chi_sphere(dime,count_sphere))
+    chi_sphere=tmp_sphere(:,1:count_sphere)
+    
+    deallocate(tmp_boundary,tmp_sphere)
+    
+  end subroutine init_obstacles
+
+  !> Compute indicator functions for the control box (including a sphere ...)
+  ! This routine must fill all chi functions in
+  !> \param box_min coordinate of the lower point of the box
+  !> \param boxMax coordinate of the upper point of the box
+  !! We suppose (require) that boxMin+boxMax corresponds to grid points ...
+  subroutine compute_control_box(resolution,step,boxMin,boxMax,center,radius,coordMin)
+    !> Number of points in each dir (1st index) for each sub (2nd index)
+    integer, dimension(dime),intent(in) :: resolution
+    !> Grid steps sizes
+    real(mk), dimension(dime), intent(in) :: step
+    !> lower point of the box
+    real(mk),dimension(dime),intent(in) :: boxMin
+    !> upper point of the box
+    real(mk),dimension(dime),intent(in) :: boxMax
+    !> position of the center of the sphere
+    real(mk),dimension(dime),intent(in):: center
+    !> radius of the sphere
+    real(mk),intent(in) :: radius
+    !> Lower point of the domain
+    real(mk),dimension(dime),intent(in) :: coordMin
+
+    logical,dimension(dime) :: isMinIn,isMaxIn
+    real(mk),dimension(dime) :: coordMax,coords
+
+    integer, dimension(2*dime) :: ind
+    integer, dimension(2*dime) :: nbPoints
+    ! Size (number of points in each dir) of the control volume
+    integer(kind=8), dimension(dime) :: boxDim
+    real(mk) :: coord,dist
+    integer :: count,i,j,k,direction,count_box
+    integer(kind=8) :: nbPointsBox
+    integer,dimension(:,:),allocatable :: tmp_box
+
+    !> radius of the sphere
+    real(mk) :: radiusBis
+
+    ! Add an assert to check that the sphere radius is shorter thant the box size ...
+    radiusBis=radius!-step(c_Z)
+    ! First compute normals to the box boundaries
+    normal(:,:)=0.0
+    normal(c_X,Up)=1.0
+    normal(c_X,Down)=-1.0
+    normal(c_Y,West)=-1.0
+    normal(c_Y,East)=1.0
+    normal(c_Z,North)=1.0
+    normal(c_Z,South)=-1.0
+    ! Coordinates of the upper point 
+    coordMax(:)=coordMin(:)+(resolution(:)-1)*step(:) 
+    ! First : check if box boundaries are in the current domain
+    isMaxIn=.False.
+    isMinIn=.False.
+    where(coordMin <= boxMin) isMinIn=.True.  ! Lower boundaries
+    where(coordMax >= boxMax) isMaxIn=.True.  ! Upper boundaries
+
+    !! Look for local indices corresponding to the box boundaries (i.e. x,y,z = boxMin() and boxMax())
+    ! index order: Down,Up,West,East,South,North
+    ind=0 !
+    where(isMinIn(:).and.isMaxIn(:)) 
+       ind(1:2*dime:2)=1
+       ind(2:2*dime:2)=1
+    end where
+    where((.not.isMinIn(:)).and.(isMaxIn(:))) 
+       ind(1:2*dime:2)=1
+       ind(2:2*dime:2)=1
+    end where
+    where((isMinIn(:)).and.(.not.isMaxIn(:)))
+       ind(1:2*dime:2)=1
+       ind(2:2*dime:2)=resolution(:)
+    end where
+
+    do direction=1,dime 
+       do k=1,resolution(direction)
+          coord=coordMin(direction)+(k-1)*step(direction)
+          if(isMinIn(direction)) then
+             if(coord<boxMin(direction)) ind(2*direction-1)=k+1
+          end if
+          if(isMaxIn(direction)) then
+             if(coord<boxMax(direction)) then
+                ind(2*direction)=k+1
+             else
+                exit
+             end if
+          end if
+       end do
+    end do
+    ! ind now contains the number of the points that are on the box boundary, in each direction, in the following order :
+    ! ind(Down Up West East South North)
+
+    ! Remove last point to integrate properly ...
+    ind(2:2*dime:2)=ind(2:2*dime:2)-1
+    
+    ! Count the number of points on each face and inside the domain
+    nbPoints=0
+    if(isMinIn(1)) nbPoints(Down)=(ind(East)-ind(West)+1)*(ind(North)-ind(South)+1)
+    if(isMaxIn(1))   nbPoints(Up)=(ind(East)-ind(West)+1)*(ind(North)-ind(South)+1)
+    if(isMinIn(3)) nbPoints(South)=(ind(East)-ind(West)+1)*(ind(Up)-ind(Down)+1)
+    if(isMaxIn(3)) nbPoints(North)=(ind(East)-ind(West)+1)*(ind(Up)-ind(Down)+1)
+    if(isMinIn(2))  nbPoints(East)=(ind(Up)-ind(Down)+1)*(ind(North)-ind(South)+1)
+    if(isMaxIn(2))  nbPoints(West)=(ind(Up)-ind(Down)+1)*(ind(North)-ind(South)+1)
+
+    boxDim(c_X)=ind(Up)-ind(Down)+1
+    boxDim(c_Y)=ind(East)-ind(West)+1
+    boxDim(c_Z)=ind(North)-ind(South)+1
+    nbPointsBox = boxDim(c_X)*boxDim(c_Y)*boxDim(c_Z)
+    
+    allocate(tmp_box(dime,nbPointsBox))
+    allocate(chi_up(dime,nbPoints(Up)),chi_down(dime,nbPoints(Down)),chi_east(dime,nbPoints(East)),chi_west(dime,nbPoints(West)))
+    allocate(chi_south(dime,nbPoints(South)),chi_north(dime,nbPoints(North)))
+    count_box=0
+
+    if(all(boxDim>0)) then
+       do k=ind(South),ind(North)
+          coords(c_Z) = coordMin(c_Z)+(k-1)*step(c_Z)
+          do j=ind(West),ind(East)
+             coords(c_Y) = coordMin(c_Y)+(j-1)*step(c_Y)
+             do i=ind(Down),ind(Up)
+                coords(c_X) = coordMin(c_X)+(i-1)*step(c_X)
+                dist = dot_product(coords-center,coords-center) - radiusBis**2
+                if(dist >=0.0) then ! We are on or outside the sphere ...
+                   count_box = count_box+1
+                   tmp_box(c_X,count_box)=i
+                   tmp_box(c_Y,count_box)=j
+                   tmp_box(c_Z,count_box)=k
+                end if
+             end do
+          end do
+       end do
+    end if
+    allocate(chi_box(dime,count_box))
+    chi_box=tmp_box(:,1:count_box)
+    
+    deallocate(tmp_box)
+    if(isMinIn(3)) then ! South boundary
+       count=1
+       chi_south(3,:)=ind(South)
+       do j=ind(West),ind(East)
+          do i=ind(Down),ind(Up)
+             chi_south(1,count)=i
+             chi_south(2,count)=j
+             count=count+1
+          end do
+       end do
+    end if
+    if(isMinIn(2)) then ! East boundary
+       count=1
+       chi_east(2,:)=ind(East)+1
+       do k=ind(South),ind(North)
+          do i=ind(Down),ind(Up)
+             chi_east(1,count)=i
+             chi_east(3,count)=k
+             count=count+1
+          end do
+       end do
+    end if
+    if(isMinIn(1)) then ! Downstream boundary is in the domain
+       count=1
+       chi_down(1,:)=ind(Down)
+       do k=ind(South),ind(North)
+          do j=ind(West),ind(East)
+             chi_down(2,count)=j
+             chi_down(3,count)=k
+             count=count+1
+          end do
+       end do
+    end if
+
+    if(isMaxIn(3)) then ! North boundary is in the domain
+       count=1
+       chi_north(3,:)=ind(North)+1
+       do j=ind(West),ind(East)
+          do i=ind(Down),ind(Up)
+             chi_north(1,count)=i
+             chi_north(2,count)=j
+             count=count+1
+          end do
+       end do
+    end if
+    if(isMaxIn(2)) then ! West boundary is in the domain
+       count=1
+       chi_west(2,:)=ind(West)
+       do k=ind(South),ind(North)
+          do i=ind(Down),ind(Up)
+             chi_west(1,count)=i
+             chi_west(3,count)=k
+             count=count+1
+          end do
+       end do
+    end if
+    if(isMaxIn(1)) then ! Upstream boundary is in the domain
+       count=1
+       chi_up(1,:)=ind(Up)+1
+       do k=ind(South),ind(North)
+          do j=ind(West),ind(East)
+             chi_up(2,count)=j
+             chi_up(3,count)=k
+             count=count+1
+          end do
+       end do
+    end if
+
+    bufferForce = 0.0    
+    ! Compute coef used to calculate the drag in the Nocas's way
+    coef = 2./(uinf**2*pi*radius**2)
+
+  end subroutine compute_control_box
+  
+
+  !> Set input field to one on the control volume boundaries and to zero elsewhere 
+  subroutine compute_test(testfield,chi)
+    real(mk), dimension(:,:,:),pointer:: testfield
+    integer,dimension(:,:),pointer ::chi
+    integer :: k
+    do k=1,size(chi,2)
+       testfield(chi(1,k),chi(2,k),chi(3,k)) = 1.0
+    end do
+  end subroutine compute_test
+
+  !> Computation of the drag according to "method B" presented in
+  !! Noca99 or Plouhmans, 2002, Journal of Computational Physics
+  subroutine nocaForces(force,velo,vort,nu,coordMin,step,dt,dvol)
+
+    !> The force to be computed
+    real(mk), dimension(dime),intent(inout) :: force
+    !! velocity and vorticity fields, intent(in)
+    real(mk), dimension(:,:,:,:,:),pointer :: velo,vort
+    !> viscosity
+    real(mk),intent(in)::nu
+    !! Coordinates of the lowest point in the domain
+    real(mk),dimension(dime),intent(in):: coordMin
+    !! mesh step sizes
+    real(mk),dimension(dime),intent(in)::step
+    !> Time step
+    real(mk),intent(in)::dt
+    !> element. vol
+    real(mk),intent(in) ::dvol
+    ! Surface element
+    real(mk) :: dsurf
+    real(mk),dimension(dime)::localForce
+    integer :: info
+    localForce=0.0
+    force=0.0
+
+    ! Downstream and upstream surface
+    dsurf=step(c_Y)*step(c_Z)
+    call integrateOnSurface(localForce,velo,vort,chi_down,normal(:,Down),c_X,nu,dsurf,coordMin,step)
+    call integrateOnSurface(localForce,velo,vort,chi_up,normal(:,Up),c_X,nu,dsurf,coordMin,step)
+    ! East and West 
+    dsurf=step(c_X)*step(c_Z)
+    call integrateOnSurface(localForce,velo,vort,chi_east,normal(:,East),c_Y,nu,dsurf,coordMin,step)
+    call integrateOnSurface(localForce,velo,vort,chi_west,normal(:,West),c_Y,nu,dsurf,coordMin,step)
+    ! North and south
+    dsurf=step(c_Y)*step(c_X)
+    call integrateOnSurface(localForce,velo,vort,chi_south,normal(:,South),c_Z,nu,dsurf,coordMin,step)
+    call integrateOnSurface(localForce,velo,vort,chi_north,normal(:,North),c_Z,nu,dsurf,coordMin,step)
+    ! over the volume ...
+    call integrateOnBox(localForce,vort,chi_box,dvol,dt,coordMin,step)
+
+    localForce=localForce*coef
+    !write(*,'(a,3f10.5)') ' drag local: ', localForce
+    call MPI_Reduce(localForce,force,dime,MPI_DOUBLE_PRECISION,MPI_SUM,0,MPI_COMM_WORLD,info)
+
+  end subroutine nocaForces
+
+  !> Return -1/(dime-1)*d/dt int_over_control_box coord X vorticity 
+  subroutine integrateOnBox(force,vort,chi,dvol,dt,coordMin,step)
+    !! The force to be computed
+    real(mk), dimension(dime),intent(inout) :: force
+    !! vorticity fields, intent(in)
+    real(mk), dimension(:,:,:,:,:),pointer :: vort
+    !! Indicator function of the box
+    integer, dimension(:,:), pointer :: chi
+    !! Element of volume
+    real(mk),intent(in)::dvol
+    !> Time step
+    real(mk),intent(in)::dt
+    !! Coordinates of the lowest point in the domain
+    real(mk),dimension(dime),intent(in):: coordMin
+    !! mesh step sizes
+    real(mk),dimension(dime),intent(in)::step
+
+    ! coordinates of the current point
+    real(mk),dimension(dime) :: coords,int1
+    ! local indices
+    integer :: i,j,k,ind
+    real(mk)::fact
+    fact = -dvol/((dime-1)*dt)
+    int1=0.0
+    !! For all points in the box ...
+    do ind=1,size(chi,2)
+       i=chi(1,ind)
+       j=chi(2,ind)
+       k=chi(3,ind)
+       ! coordinates of the current point
+       coords = coordMin + (chi(:,ind)-1)*step
+       !! part1 of the force
+       int1=int1+cross_prod(coords,vort(:,i,j,k,nsublist))
+    end do
+    force = force + fact*(int1-bufferForce)
+    bufferForce = int1 ! Save for next time step ...
+  end subroutine integrateOnBox
+
+  !> Compute integrals on surface to calculate forces acting on the body.
+  !! See (2.1) of Noca 1999 or (52) of Plouhmans 2002 
+  !! Integrals on the sphere are neglected. 
+  subroutine integrateOnSurface(force,velo,vort,chi,NormalVec,direction,nu,dsurf,coordMin,step)
+    !! The force to be computed
+    real(mk), dimension(dime),intent(inout) :: force
+    !! velocity and vorticity fields, intent(in)
+    real(mk), dimension(:,:,:,:,:),pointer :: velo,vort
+    !! Indicator function of the considered face, intent(in)
+    integer, dimension(:,:), pointer :: chi
+    !! Normal to the considered face (dir : to the outside of the volume)
+    real(mk), dimension(dime),intent(in) :: NormalVec
+    !! index of the non-null coordinate in normal, must be c_X,c_Y or c_Z
+    integer,intent(in) :: direction
+    !! viscosity
+    real(mk),intent(in)::nu
+    !! Element of surface
+    real(mk),intent(in)::dsurf
+    !! Coordinates of the lowest point in the domain
+    real(mk),dimension(dime),intent(in):: coordMin
+    !! mesh step sizes
+    real(mk),dimension(dime),intent(in)::step
+
+    !! Some local values ...
+    real(mk) :: u_u,n_u,n_w,fact
+    integer :: i,j,k,ind
+    !! local coordinates
+    real(mk), dimension(dime) :: coords
+    real(mk), dimension(dime) :: int1,int2,nDivT,diff_dir,buffer
+    
+    fact = 1./(dime-1)
+    ! For each point of the current plane ...
+    int1=0.0
+    int2=0.0
+    diff_dir=0.0
+    do ind=1,size(chi,2)
+       i=chi(1,ind)
+       j=chi(2,ind)
+       k=chi(3,ind)
+       !! part1 = 1/2(velocity.velocity)n - (n.velocity)velocity - 1/(dime-1)((n.velocity)(coord X vorticity) + (n.vorticity)(coord X velocity)
+       ! 0.5*velocity.velocity
+       u_u=dot_product(velo(:,i,j,k,nsublist),velo(:,i,j,k,nsublist))
+       ! normal.velocity
+       n_u=dot_product(velo(:,i,j,k,nsublist),NormalVec(:))
+       ! normal.vorticity
+       n_w=dot_product(vort(:,i,j,k,nsublist),NormalVec(:))
+       ! coordinates of the current point
+       coords = coordMin + (chi(:,ind)-1)*step
+       !! part1 of the force
+       int1=int1+0.5*u_u*NormalVec(:)-n_u*velo(:,i,j,k,nsublist)&
+            - fact*n_u*cross_prod(coords,vort(:,i,j,k,nsublist))&
+            + fact*n_w*cross_prod(coords,velo(:,i,j,k,nsublist))
+
+       !! part 2 of the force, the one concerning T = nu(nabla u + nabla uT)
+       !! Considering that the box is a parallepiped, each normal is equal to something like 1 0 0 
+       !! and the integral simplifies in :
+       !! n.T = nabla velocity_dir + d/ddir velocity, dir being the dir of the normal,
+       !! n X nabla.T = function(laplacian of the velocity components)
+       nDivT = 0.0 ! n X nabla.T 
+
+       if(direction==c_X) then ! face = Down or Up, d/dx
+          diff_dir = diffX(velo,i,j,k,step(direction),nsublist)
+          nDivT(2) = -laplacian(velo,c_Z,i,j,k,step,nsublist) ! Laplacian of velocity_y
+          nDivT(3) = laplacian(velo,c_Y,i,j,k,step,nsublist) ! Laplacian of velocity_z
+       else if(direction==c_Y) then ! face = East or West, d/dy
+          diff_dir = diffY(velo,i,j,k,step(direction),nsublist) 
+          nDivT(3)=-laplacian(velo,c_X,i,j,k,step,nsublist) ! Laplacian of velocity_x
+          nDivT(1)=laplacian(velo,c_Z,i,j,k,step,nsublist) ! Laplacian of velocity_z
+       else if(direction==c_Z) then ! face = North or South, d/dz
+          diff_dir = diffZ(velo,i,j,k,step(direction),nsublist)
+          nDivT(2)=laplacian(velo,c_X,i,j,k,step,nsublist) ! Laplacian of velocity_x
+          nDivT(1)=-laplacian(velo,c_Y,i,j,k,step,nsublist) ! Laplacian of velocity_y
+       end if
+       buffer=nabla(velo,direction,i,j,k,step,nsublist) + diff_dir + fact*cross_prod(coords,nDivT)
+       buffer=NormalVec(direction)*nu*buffer
+       int2=int2+buffer
+
+    end do
+
+    ! Product with element of surface and sum to the total (input) force
+    force = force+(int1+int2)*dsurf
+  end subroutine integrateOnSurface
+
+  !> get memory allocated for indicator sets
+  function getMemoryForIndicators()
+    real(mk) :: getMemoryForIndicators
+    
+    getMemoryForIndicators = sizeof(chi_boundary)+sizeof(chi_sphere)+sizeof(chi_box)+sizeof(chi_north)&
+         + sizeof(chi_west)+sizeof(chi_east)+sizeof(chi_south)+sizeof(chi_up)+sizeof(chi_down)
+    getMemoryForIndicators = getMemoryForIndicators*1e-6
+    if(verbose) then
+       write(*,'(a,i3,a,f10.4,a)') &
+            '[',rank,'] memory used for indicator sets:', getMemoryForIndicators, ' MB.'
+    end if
+    
+  end function getMemoryForIndicators
+
+end module SetsIndicators
diff --git a/HySoP/src/Unstable/TestFunctions.f90 b/HySoP/src/Unstable/TestFunctions.f90
new file mode 100755
index 0000000000000000000000000000000000000000..2013ff5277d0aa45c8a8a6e94a7f3c7f69bd6554
--- /dev/null
+++ b/HySoP/src/Unstable/TestFunctions.f90
@@ -0,0 +1,380 @@
+!> Functions used to compute fields values on the grid, for specific pre-defined cases. 
+module testsFunctions
+
+  use client_data
+  implicit none
+
+  real(mk) :: xref
+  integer :: np
+contains
+  
+  !> Computes the analytical values for stream function, velocity and vorticity such that
+  !>   \f{eqnarray*}{ rhs_{ex} &=& -\omega_{ex} = \Delta\psi_{ex} \\    vel_{ex} &=& \nabla \times \psi_{ex} \f}
+  !>   \f{eqnarray*}{\nabla.\psi_{ex} &=& \nabla.\omega_{ex} = \nabla.vel_{ex} = 0.0  \\\nabla\times vel_{ex} &=& \omega_{ex}  \f}
+  !>  (see maple file)
+  subroutine poisson_analytic(resolution,step,coordMin,rhs_ex,vel_ex,psi_ex)
+
+    !> the local resolution
+    integer, dimension(dime),intent(in) :: resolution
+    !> size of mesh step in each dir
+    real(mk), dimension(dime),intent(in) :: step
+    !> Coordinates of the local minimum point
+    real(mk),dimension(dime),intent(in) :: coordMin
+    ! rhs function values on the grid (i.e. -omega_ex)
+    real(mk), dimension(:,:,:,:), pointer :: rhs_ex
+    ! velocity function values on the grid
+    real(mk), dimension(:,:,:,:), pointer :: vel_ex
+    ! Stream function values on the grid
+    real(mk), dimension(:,:,:,:), pointer :: psi_ex
+    
+    
+    real(mk) :: x,y,z,cx,cy,cz,c2x,c2y,c2z,sx,sy,sz,s2x,s2y,s2z
+    integer :: i,j,k
+    real(mk) :: pi
+    
+    pi = 4.0*atan(1.0_mk)
+       
+    do k=1,resolution(c_Z)
+       z = coordMin(c_Z) + (k-1)*step(c_Z)
+       cz=cos(pi*z)
+       c2z=cos(2.*pi*z)
+       sz=sin(pi*z)
+       s2z=sin(2.*pi*z)
+       do j=1,resolution(c_Y)
+          y = coordMin(c_Y) + (j-1)*step(c_Y)
+          cy=cos(pi*y)
+          c2y=cos(2.*pi*y)
+          sy=sin(pi*y)
+          s2y=sin(2.*pi*y)
+          do i=1,resolution(c_X)
+             x = coordMin(c_X) + (i-1)*step(c_X)
+             cx=cos(pi*x)
+             c2x=cos(2.*pi*x)
+             sx=sin(pi*x)
+             s2x=sin(2.*pi*x)
+             
+             rhs_ex(1,i,j,k) = 8.*s2y*pi**2*s2z
+             rhs_ex(2,i,j,k) = 8.*s2x*pi**2*s2z
+             rhs_ex(3,i,j,k)=  8.*s2x*pi**2*s2y
+             
+             vel_ex(1,i,j,k) = 2.*s2x*pi*(c2y - c2z)
+             vel_ex(2,i,j,k) = 2.*s2y*pi*(c2z - c2x)
+             vel_ex(3,i,j,k) = 2.*s2z*pi*(c2x - c2y)
+             
+             psi_ex(1,i,j,k) = sy*sz
+             psi_ex(2,i,j,k) = sx*sz
+             psi_ex(3,i,j,k) = sx*sy
+             
+          end do
+       end do
+    end do
+  end subroutine poisson_analytic
+  
+  !> Init vorticity field on a grid, such that :
+  !> \f$  \omega(x,y,z) =  \left[\begin{array}{c} 0 \\ -3z/(Lz^2) \\ 0 \end{array}\right]\f$
+  subroutine init_vorticity(vorticity,resolution,step,coordMin,lower,upper)
+
+    !> vorticity field
+    real(mk), dimension(:,:,:,:), pointer :: vorticity 
+    !> the local mesh resolution
+    integer,dimension(dime),intent(in) :: resolution
+    !> size of mesh step in each dir
+    real(mk), dimension(dime),intent(in) :: step
+    !> Coordinates of the minimal point of the local domain
+    real(mk),dimension(dime),intent(in) :: coordMin
+    !> boundaries of the domain
+    real(mk),dimension(dime),intent(in):: upper,lower
+    integer :: i,j,k
+    real(mk) :: x,z,physicalDomainSize_Z
+    real(mk) :: coef
+
+    physicalDomainSize_Z = (upper(c_Z)- lower(c_Z))/2.
+    vorticity = 0.0
+    coef = -3./(physicalDomainSize_Z)**2
+    
+    do k=1,resolution(c_Z)
+       z = coordMin(c_Z) + (k-1)*step(c_Z)
+       do j=1,resolution(c_Y)
+          !y = coordMin(c_Y) + (j-1)*step(c_Y)
+          do i=1,resolution(c_X)
+             x = coordMin(c_X) + (i-1)*step(c_X)
+             !if( (z < upper(c_Z)).and.(z>lower(c_Z))) then
+                vorticity(c_Y,i,j,k) = cos(2.*pi*z)!)coef*z
+             !endif
+          end do
+       end do
+    end do
+    
+    
+  end subroutine init_vorticity
+  
+  !> Computes the analytical values for stream function, velocity and vorticity such that
+  !>   \f{eqnarray*}{ rhs_{ex} &=& -\omega_{ex} = \Delta\psi_{ex} \\    vel_{ex} &=& \nabla \times \psi_{ex} \f}
+  !>   \f{eqnarray*}{\nabla.\psi_{ex} &=& \nabla.\omega_{ex} = \nabla.vel_{ex} = 0.0  \\\nabla\times vel_{ex} &=& \omega_{ex}  \f}
+  !>  (see maple file)
+  subroutine rhs_analytic(resolution,step,coordMin,rhs_ex,velocity,vorticity,nu)
+    !> the local mesh resolution
+    integer,dimension(dime),intent(in) :: resolution
+    !> size of mesh step in each dir
+    real(mk), dimension(dime),intent(in) :: step
+    !> Coordinates of the minimal point of the local domain
+    real(mk),dimension(dime),intent(in) :: coordMin
+    ! rhs function values on the grid (i.e. -omega_ex)
+    real(mk), dimension(:,:,:,:), pointer :: rhs_ex,velocity,vorticity
+    !> viscosity
+    real(mk),intent(in) :: nu
+
+    real(mk) :: x,y,z,cx,cy,cz,c2x,c2y,c2z,sx,sy,sz,s2x,s2y,s2z
+    integer :: i,j,k
+    
+    do k=1,resolution(c_Z)
+       z = coordMin(c_Z) + (k-1)*step(c_Z)
+       cz=cos(pi*z)
+       c2z=cos(2.*pi*z)
+       sz=sin(pi*z)
+       s2z=sin(2.*pi*z)
+       do j=1,resolution(c_Y)
+          y = coordMin(c_Y) + (j-1)*step(c_Y)
+          cy=cos(pi*y)
+          c2y=cos(2.*pi*y)
+          sy=sin(pi*y)
+          s2y=sin(2.*pi*y)
+          do i=1,resolution(c_X)
+             x = coordMin(c_X) + (i-1)*step(c_X)
+             cx=cos(pi*x)
+             c2x=cos(2.*pi*x)
+             sx=sin(pi*x)
+             s2x=sin(2.*pi*x)
+             
+             rhs_ex(1,i,j,k) = 32.*s2y*pi**4*s2z*(-2.*nu+c2x*c2y-c2x*c2z)
+             rhs_ex(2,i,j,k) = -32.*s2x*pi**4*s2z*(2.*nu-c2y*c2z+c2x*c2y)
+             rhs_ex(3,i,j,k) = 32.*s2x*pi**4*s2y*(-2.*nu+c2x*c2z-c2z*c2y)
+
+             vorticity(1,i,j,k) = 8.*s2y*pi**2*s2z
+             vorticity(2,i,j,k) = 8.*s2x*pi**2*s2z
+             vorticity(3,i,j,k)=  8.*s2x*pi**2*s2y
+             
+             velocity(1,i,j,k) = 2.*s2x*pi*(c2y - c2z)
+             velocity(2,i,j,k) = 2.*s2y*pi*(c2z - c2x)
+             velocity(3,i,j,k) = 2.*s2z*pi*(c2x - c2y)
+             
+             
+          end do
+       end do
+    end do
+  end subroutine rhs_analytic
+  !> Test purpose function,
+  !> computes \f$  \omega(x,y,z) =  \left[\begin{array}{c} 0 \\ 0 \\ 0.1 \end{array}\right]\f$
+  !> and  \f$  velocity(x,y,z) =  \left[\begin{array}{c} 0 \\ 0 \\ 2.0 \end{array}\right]\f$
+  subroutine test_particles(vorticity,velocity,resolution,step,coordMin)
+    
+    !> vorticity field
+    real(mk), dimension(:,:,:,:), pointer :: vorticity ,velocity
+    !> the local resolution
+    integer, dimension(dime),intent(in) :: resolution
+    !> size of mesh step in each dir
+    real(mk), dimension(dime),intent(in) :: step
+    !> Coordinates of the local minimum point
+    real(mk),dimension(dime),intent(in) :: coordMin
+
+    integer :: i,j,k,l
+    real(mk) :: pi,x,y,z
+    
+    pi = 4.0*atan(1.0_mk)
+    vorticity = 0.
+    velocity = 0.
+    do k=1,resolution(c_Z)
+       z = coordMin(c_Z) + (k-1)*step(c_Z)
+       do j=1,resolution(c_Y)
+          y = coordMin(c_Y) + (j-1)*step(c_Y)
+          do i=1,resolution(c_X)
+             x = coordMin(c_X) + (i-1)*step(c_X)
+             if( (x .eq.10*step(c_X)).and.(y.eq.0).and.(z.eq.xref)) then
+                do l=1,np
+                   vorticity(3,i,j,k+l-1) =  0.1;
+                   velocity(3,i,j,k+l-1) = 2.0
+                enddo
+             endif
+          end do
+       end do
+    end do
+    xref = xref + step(c_Z)
+    !np = np +1
+  end subroutine test_particles
+  
+  !> Test purpose function,
+  !> computes \f$  \omega(x,y,z) =  \left[\begin{array}{c} cos(x) \\ cos(x) \\ cos(x) \end{array}\right]\f$
+  subroutine test_vorticity(vorticity,resolution,step,coordMin,lower,upper)
+    ! vorticity field
+    real(mk), dimension(:,:,:,:), pointer :: vorticity 
+    !> the local resolution
+    integer, dimension(dime),intent(in) :: resolution
+    !> size of mesh step in each dir
+    real(mk), dimension(dime),intent(in) :: step
+    !> Coordinates of the local minimum point
+    real(mk),dimension(dime),intent(in) :: coordMin
+    !> Boundaries coordinates
+    real(mk),dimension(dime),intent(in):: upper, lower
+
+    integer :: i,j,k
+    real(mk) :: pi,x,y,z,physicalDomainSize_Z
+    real(mk) :: coef
+
+    physicalDomainSize_Z = (upper(3)- lower(3))/2.
+    pi = 4.0*atan(1.0_mk)
+    vorticity = 0.0
+    coef = -3./(physicalDomainSize_Z)**2
+    do k=1,resolution(c_Z)
+       z = coordMin(c_Z) + (k-1)*step(c_Z)
+       do j=1,resolution(c_Y)
+          y = coordMin(c_Y) + (j-1)*step(c_Y)
+          do i=1,resolution(c_X)
+             x = coordMin(c_X) + (i-1)*step(c_X)
+             vorticity(:,i,j,k) = cos(x)
+          end do
+       end do
+    end do
+  end subroutine test_vorticity
+
+
+  !> Compute flow through x = xmin surface, for the following stream function :
+  !> \f$  \psi(x,y,z) =  \left[\begin{array}{c} 0 \\-U_{inf}z(1 - \frac{R^2}{z^2+x^2})  \\ 0 \end{array}\right] \f$
+  !> i.e.
+  !> \f[ flowRate_{theo} = \left[ -U_{inf}length_y(z - \frac{R^2z}{z^2+x^2})\right]_{lower_z}^{upper_z}\f]
+  function requiredFlowRate3D(radius,length,lower,upper,uinf)
+
+    !> sphere radius
+    real(mk),intent(in) :: radius
+    !> domain dimensions
+    real(mk),dimension(dime),intent(in) :: length
+    !> physical domain lower point
+    real(mk),dimension(dime),intent(in)::lower
+    !> physical domain upper point
+    real(mk),dimension(dime),intent(in)::upper
+    !> velocity inf
+    real(mk),intent(in) :: uinf
+    !> required flow rate
+    real(mk) :: requiredFlowRate3D
+    
+    real(mk) :: dom
+    ! position of the surface for flow rate computation
+    real(mk)::xPos
+
+    xPos = lower(c_X)
+    dom = upper(c_Z)**2+xPos**2
+    if(abs(dom) < epsilon(dom)) then ! if dom == 0
+       requiredFlowRate3D = upper(c_Z)
+    else
+       requiredFlowRate3D = upper(c_Z)*(1.-radius**2/dom)
+    end if
+    dom = lower(c_Z)**2+xPos**2
+    if(abs(dom) < epsilon(dom)) then
+       requiredFlowRate3D = requiredFlowRate3D - lower(c_Z)
+    else 
+       requiredFlowRate3D = requiredFlowRate3D - lower(c_Z)*(1. - radius**2/dom)
+    end if
+    requiredFlowRate3D = requiredFlowRate3D * uinf *length(c_Y)
+    return
+  end function requiredFlowRate3D
+
+    !> Compute flow through x = xmin surface, for the following stream function :
+  !> \f$  \psi(x,y,z) =  \left[\begin{array}{c} 0 \\-U_{inf}z(1 - \frac{R^2}{z^2+x^2})  \\ 0 \end{array}\right] \f$
+  !> i.e.
+  !> \f[ flowRate_{theo} = \left[ -U_{inf}length_y(z - \frac{R^2z}{z^2+x^2})\right]_{lower_z}^{upper_z}\f]
+  function requiredFlowRate2D(radius,lower,upper,uinf)
+
+    !> sphere radius
+    real(mk),intent(in) :: radius
+    !> physical domain lower point
+    real(mk),dimension(dime),intent(in)::lower
+    !> physical domain upper point
+    real(mk),dimension(dime),intent(in)::upper
+    !> velocity inf
+    real(mk),intent(in) :: uinf
+    !> required flow rate
+    real(mk) :: requiredFlowRate2D
+    
+    real(mk) :: dom
+    ! position of the surface for flow rate computation
+    real(mk)::xPos
+
+    xPos = lower(c_X)
+    dom = upper(c_Y)**2+xPos**2
+    if(abs(dom) < epsilon(dom)) then ! if dom == 0
+       requiredFlowRate2D = upper(c_Y)
+    else
+       requiredFlowRate2D = upper(c_Y)*(1.-radius**2/dom)
+    end if
+    dom = lower(c_Y)**2+xPos**2
+    if(abs(dom) < epsilon(dom)) then
+       requiredFlowRate2D = requiredFlowRate2D - lower(c_Y)
+    else
+       requiredFlowRate2D = requiredFlowRate2D - lower(c_Y)*(1. - radius**2/dom)
+    end if
+    requiredFlowRate2D = requiredFlowRate2D * uinf
+    return
+  end function requiredFlowRate2D
+
+
+  subroutine Gaussian2D(field,resolution,step,coordMin,center)
+    
+    !> Field initialized with a Gaussian
+    real(mk), dimension(:,:,:), pointer :: field
+    !> Space step
+    real(mk), dimension(dime) :: step
+    !> local resolution
+    integer, dimension(dime) :: resolution
+    !> Coordinates of the lowest point in the current subdomain
+    real(mk),dimension(dime),intent(in) :: coordMin
+    !> 
+    real(mk), dimension(dime),intent(in) :: center
+           
+    real(mk), parameter :: sigma = 0.2
+    real(mk) :: expo
+    real(mk),dimension(dime) :: coord
+    integer :: i,j
+
+    field = 0.0
+    do j = 1,resolution(c_Y)
+       coord(c_Y) = coordMin(c_Y)+(j-1)*step(c_Y)-center(c_Y)
+       do i = 1, resolution(c_X)
+          coord(c_X) = coordMin(c_X) + (i-1)*step(c_X)-center(c_X)
+          expo=dot_product(coord,coord)*0.5/sigma**2
+          field(i,j,:) = exp(-expo)
+       end do
+    end do
+
+  end subroutine Gaussian2D
+
+  subroutine Gaussian1D(field,resolution,step,coordMin,dir,shift)
+    
+    !> Field initialized with a Gaussian
+    real(mk), dimension(:,:,:), pointer :: field
+    !> Space step
+    real(mk), dimension(dime) :: step
+    !> local resolution
+    integer, dimension(dime) :: resolution
+    !> Coordinates of the lowest point in the current subdomain
+    real(mk),dimension(dime),intent(in) :: coordMin
+    !> 
+    real(mk), intent(in) :: shift
+    !> Advection direction
+    integer, intent(in) :: dir
+
+    real(mk), parameter :: sigma = 0.2
+    real(mk) :: coord,coeff
+    integer :: i
+
+    !coord(c_Y) = coordMin(c_Y) + (j-1)*step(c_Y)
+    coeff = 1./(sigma*sqrt(2.*pi))
+    print * ,"shift",shift
+    field = 0.
+    do i = 1, resolution(dir)
+       coord = coordMin(dir) + (i-1)*step(dir)-shift
+       field(i,:,1) = coeff*exp(-0.5*(coord/sigma)**2.)
+    end do
+
+  end subroutine Gaussian1D
+  
+
+end module testsFunctions
diff --git a/HySoP/src/Unstable/callPPM.f90 b/HySoP/src/Unstable/callPPM.f90
new file mode 100644
index 0000000000000000000000000000000000000000..d934225342b1f5442b94bc361e756c138a18ae01
--- /dev/null
+++ b/HySoP/src/Unstable/callPPM.f90
@@ -0,0 +1,98 @@
+!> temp module to call and test ppm functions.
+module testPPM
+
+  !-------------------------------------------------------------------------
+  !  Modules 
+  !-------------------------------------------------------------------------
+!!$  USE ppm_module_map
+!!$  USE ppm_module_topo
+!!$  USE ppm_module_loadbal
+!!$  USE ppm_module_neighlist
+!!$  USE ppm_module_ode
+!!$  USE ppm_module_user_util
+!!$  USE pse_global
+!!$  USE pse_module_io
+!!$  USE pse_module_comp
+
+  use ppm_module_init
+  use ppm_module_substart
+  use ppm_module_substop
+  use ppm_module_finalize
+  use ppm_module_data
+  use mpi
+
+  implicit none
+
+contains
+
+  subroutine mult(A,x,sizeA)
+    real, dimension(:) :: A
+    real :: x
+    integer :: sizeA,j
+    
+    real*8, dimension(sizeA) :: B
+    
+    x = 3
+   print *, "otot"
+   print *, A(1)
+   print *, "titi"
+
+    do j=1,sizeA
+       B(j) =j
+       !! do i=1,sizeA
+   !!       print *, A(i,j)
+!!$          A(i,j) = x*A(i,j)
+       !!print *, A(j)
+       !!print *,'toto', B(j)
+       !!end do
+    end do
+  end subroutine mult
+
+  subroutine init()
+
+    real(8) :: t0
+    integer :: info,size
+    integer :: ndim, tolexp, comm, debug, ppm_log_unit
+    integer, parameter :: MK = KIND(1.0D0)
+
+    ndim = 3
+    tolexp = -15
+    comm = MPI_COMM_WORLD
+    debug = 2
+    ppm_log_unit  = 99
+
+    call MPI_Init(info)
+    if(info.ne.0) then 
+       write(*,*) 'FAILED TO INITIALIZE MPI. ABORTING!'
+    end if
+    call MPI_COMM_SIZE(comm,size,info)
+
+    ! PPM init.
+    !! This will:
+    !! - init the stdout, stderr, stdlog outputs
+    !! - check if MPI has been properly initialized
+    !! - fill comm, ppm_rank, ppm_nb_procs ... and MPI/ppm related values
+    !! - check and set dimension of the problem, precision and tolerance
+    !! - set MPI_PREC --> ppm_mpi_kind
+    !! - init proc speed array (ppm_proc_speed)
+    !! - reset topology counter
+    call ppm_init(ndim,MK,tolexp,comm,debug,info,ppm_log_unit,98,97)
+
+    print *, "debug ", debug, " module debug", ppm_debug
+
+    !! Display ...
+    call substart('testPPM',t0,info)
+
+    !! Warning : display elapsed time since t0. 
+    call substop ('testPPM', t0, info)
+
+    !! Finalize
+    !! - deallocate lots of arrays ...
+    !! - call ppm_mesh_finalize --> deallocate mesh structures
+    call ppm_finalize(info)
+
+    call MPI_FINALIZE(info)
+
+  end subroutine init
+end module testPPM
+
diff --git a/HySoP/src/interfaces/Fortran2Cpp/WrapC.hpp b/HySoP/src/interfaces/Fortran2Cpp/WrapC.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..919a18e57f3472bb172a7c885afb5e4aeb17c317
--- /dev/null
+++ b/HySoP/src/interfaces/Fortran2Cpp/WrapC.hpp
@@ -0,0 +1,10 @@
+#ifndef CWRAPPER_HPP
+#define CWRAPPER_HPP 
+
+typedef struct {
+  int length;
+  double* elements;
+} C2FPtr;
+
+
+#endif
diff --git a/HySoP/src/interfaces/Fortran2Cpp/WrapFortran.f90 b/HySoP/src/interfaces/Fortran2Cpp/WrapFortran.f90
new file mode 100644
index 0000000000000000000000000000000000000000..7748dd0c404c007f1d8f956006915e9fff560b1b
--- /dev/null
+++ b/HySoP/src/interfaces/Fortran2Cpp/WrapFortran.f90
@@ -0,0 +1,35 @@
+module WrapFort
+
+  use  iso_c_binding
+
+  implicit none
+
+  public aliasF2C
+  
+  real(kind=8) :: ref
+  integer, parameter :: real_kind = kind(ref)
+  
+  !> A structure to bind C and fortran pointers 
+  type, bind(C), public :: C2FPtr
+      integer (c_int) :: length
+      type (c_ptr) :: elements
+   end type C2FPtr
+
+   logical, parameter :: NDEBUG = .TRUE.
+
+contains
+
+  subroutine aliasF2C(vectorC, vectorF, length)
+    type(c_ptr),intent(inout) :: vectorC
+    integer(c_int), intent(out) :: length
+    real(kind=real_kind), pointer, dimension(:) :: vectorF
+
+    if(.not.associated(vectorF) ) then
+       stop 'Error, input Fortran vector is not associated'
+    end if
+    length = size(vectorF)
+    vectorC = c_loc(vectorF(1))
+    
+  end subroutine aliasF2C
+  
+end module WrapFort
diff --git a/HySoP/src/interfaces/ppm/ppm_wrapper.hpp b/HySoP/src/interfaces/ppm/ppm_wrapper.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..85181449f73f2baea3acbceb8356d3beba5a1728
--- /dev/null
+++ b/HySoP/src/interfaces/ppm/ppm_wrapper.hpp
@@ -0,0 +1,104 @@
+/** \file ppm_wrapper.hpp Interfaces to ppm (fortran) routines
+ */
+#ifndef PPMWRAPPER_HPP
+#define PPMWRAPPER_HPP 
+#include"F2CMangle.hpp"
+#include<mpi.h>
+#include <cstring>
+
+/** Namespace for ppm functions and subroutines */
+
+namespace PPM 
+{
+  
+  extern "C" {
+
+    //F2C_GLOBAL(pass,PASS);
+    // Init and close ppm
+    void F2C_MODULE(ppm_module_init,ppm_init, PPM_MODULE_INIT,PPM_INIT)(int*, int*,int*,int*,int*,int*,int*,int*,int*);
+    void F2C_MODULE(ppm_module_finalize, ppm_finalize,PPM_MODULE_FINALIZE,PPM_FINALIZE)(int&);
+    // Display functions
+    void F2C_MODULE(charfunctions, start, CHARFUNCTIONS, START)(double*,int*,char*,int );
+    void F2C_MODULE(charfunctions, stop, CHARFUNCTIONS, stop)(double*,int*,char*,int );
+
+    void F2C_MODULE(testppm,mult,TESTPPM,MULT)(double*,double*,int*);
+
+    // Topologies
+    //    void F2C_MODULE(ppm_module_mktopo, ppm_mktopo, PPM_MODULE_MKTOPO,PPM_MKTOPO)(int*, double*, int*,
+    
+    void F2C_MODULE(modtest, cas1, MODTEST,CAS1)(double*);
+    void F2C_MODULE(modtest, cas2, MODTEST,CAS2)(double*);
+    void F2C_MODULE(modtest, cas3, MODTEST,CAS3)(double*);
+    void F2C_MODULE(modtest, cas4, MODTEST,CAS4)(double*);
+    void F2C_MODULE(modtest, cas5, MODTEST,CAS5)(double*);
+    void F2C_MODULE(modtest, cas6, MODTEST,CAS6)(double*);
+    void F2C_MODULE(modtest, application3, MODTEST,APPLICATION3)(); 
+    //    int F2C_MODULE(ppm_module_data,ppm_debug, PPM_MODULE_DATA,PPM_DEBUG);
+  }
+
+
+  /** A static class to call wrapped ppm functions */
+  class wrapper 
+  {
+    
+    static int _info; 
+
+  public : 
+    
+    /** PPM initialization 
+	\param problem dimension
+	\param precision for real numbers
+	\param exp tolerance
+	\param MPI comm 
+	\param debug mode value
+	\param error status
+	\param log unit value for io
+	\param stderr unit value
+	\param stdout unit value
+    */
+    static void init(int ndim, int MK,int tolexp, MPI::Intracomm& comm, int debug, int* info, int ppm_log_unit, int err, int out)
+    {
+      MPI_Fint Fcomm = MPI_Comm_c2f(comm);
+      F2C_MODULE(ppm_module_init,ppm_init, PPM_MODULE_INIT,PPM_INIT)(&ndim,&MK,&tolexp,&Fcomm,&debug,info,&ppm_log_unit,&err,&out);
+    }
+
+    /** Terminates ppm library 
+	\param[in,out] error status
+    */ 
+    static void finalize(int& info)
+    {
+      F2C_MODULE(ppm_module_finalize, ppm_finalize,PPM_MODULE_FINALIZE,PPM_FINALIZE)(info);
+    }
+    
+    /** Wrapper to ppm substart function
+	@param[in]  caller name
+	@param[in,out] cpu time when this function is called
+	@param[in] error status
+    */
+    static void substart(std::string& msg, double* t0, int* info)
+    {
+      size_t size = msg.size()+1;
+      char * msgF = new char[size];
+      strncpy(msgF, msg.c_str(),size);
+      F2C_MODULE(charfunctions, start, CHARFUNCTIONS, START)(t0,info,msgF,strlen(msgF));
+    }
+    
+    /** Wrapper to ppm substopt function
+	@param[in]  caller name
+	@param[in] cpu time when substart for this function has been called
+	@param[in] error status
+    */
+    static void substop(std::string& msg, double* t0, int* info)
+    {
+      size_t size = msg.size()+1;
+      char * msgF = new char[size];
+      strncpy(msgF, msg.c_str(),size);      
+      F2C_MODULE(charfunctions, stop, CHARFUNCTIONS, STOP)(t0,info,msgF,strlen(msgF));
+    }
+    
+  };
+
+}
+
+
+#endif
diff --git a/HySoP/src/interfaces/ppm/wrap_ppm_topologies.f95 b/HySoP/src/interfaces/ppm/wrap_ppm_topologies.f95
new file mode 100644
index 0000000000000000000000000000000000000000..a13e7dc1f34cd0d92762a7a64108f51a825f9472
--- /dev/null
+++ b/HySoP/src/interfaces/ppm/wrap_ppm_topologies.f95
@@ -0,0 +1,54 @@
+!> Fortran 2003 interface to ppm routines related to topologies
+module wrap_ppm_topologies
+
+  use ppm_module_mktopo
+  use WrapFort
+  use ppm_module_data
+
+  implicit none
+  
+contains
+  
+  !> Purely geometry-based decompositions
+  subroutine create_topology_geom(dimPb, topoid, decomp, minPhys, maxPhys, bc, ghostsize) bind(C, name='createTopoG')
+    
+    integer(kind=c_int), intent(in) :: dimPb
+    integer(kind=c_int), intent(inout) :: topoid
+    integer(kind=c_int), intent(inout) :: decomp
+    !integer(kind=c_int), intent(in) :: assig
+    type(c_Ptr), intent(in), VALUE :: minPhys
+    type(c_Ptr), intent(in), VALUE :: maxPhys
+    type(c_Ptr), intent(in), VALUE :: bc
+    real(kind=real_kind), intent(in) :: ghostsize ! ghostsize is a real in ppm subroutine ... strange ...
+!    type(c_ptr), intent(inout) :: costPerProc
+
+    
+    ! Local vars
+    integer :: info
+    !    integer :: nbProcs = 3 ! TODO : input arg
+    integer :: assig = ppm_param_assign_internal
+    real(kind=real_kind), pointer, dimension(:) :: cost => NULL()
+    real(kind=real_kind), pointer, dimension(:) ::min_phys => NULL(), max_phys => NULL()
+    integer, pointer, dimension(:) :: bcdef => NULL()
+    ! Wrap C pointers
+    call c_f_pointer (minPhys, min_phys, (/dimPb/))
+    call c_f_pointer(maxPhys, max_phys, (/dimPb/))
+    call c_f_pointer(bc, bcdef, (/dimPb/))
+    decomp = ppm_param_decomp_cuboid
+
+    ! If cost is already allocated
+ !   if(c_associated(costPerProc)) then
+  !     call c_f_pointer(costPerProc, cost, (/nbProcs/))
+   ! end if
+
+    ! We skip optional vars for the moment ...
+    call ppm_topo_mkgeom_d(topoid, decomp,assig, min_phys, max_phys, bcdef, ghostsize, cost, info)
+    
+    !call aliasF2C(costPerProc, cost, nbProcs)    
+
+    print *, "topoid ", topoid
+
+  end subroutine create_topology_geom
+
+
+end module wrap_ppm_topologies
diff --git a/HySoP/src/io_vtk.f90 b/HySoP/src/io_vtk.f90
new file mode 100755
index 0000000000000000000000000000000000000000..7e89fcf58af45750a8e7e5f20ad2392b4b67f9ae
--- /dev/null
+++ b/HySoP/src/io_vtk.f90
@@ -0,0 +1,192 @@
+!> Tools for vtk output
+!! \todo print a real and proper vtk output ...
+module io_vtk
+
+  use client_data
+  implicit none
+
+contains
+  
+  !> print velocity and vorticity to vtk file 
+  !> 
+  subroutine printToVTK(filename,iter,field1,field2,resolution,spacing,coordMin)
+
+    ! File name prefix for velocity and vorticity outputs
+    character(len=*), intent(in) :: filename
+    ! Current iteration number
+    integer, intent(in) :: iter
+    real(mk), dimension(:,:,:), pointer :: field1, field2
+    !> Number of points in each dir (1st index) for each sub (2nd index)
+    integer, dimension(3),intent(in)  :: resolution
+    real(mk),dimension(3),intent(in)::coordMin
+    real(mk), dimension(3),intent(in) :: spacing
+
+    character(len=60)           :: buffer
+    integer :: nbpoints
+    
+    ! local resolution
+    integer :: nx,ny,nz
+
+    !local filename (depends on subproc number)
+    character(len=30) :: localname,buffer2,localname2
+
+    ! output = nb of iterations
+    write(buffer,*) iter
+    buffer = adjustl(buffer)
+    ! output =  velocity
+!    write(name_vit,'(A)') trim(filename)//"_velocity_"//trim(buffer)//".vtk"
+ !   write(name_omg,'(A)') trim(filename)//"_vorticity_"//trim(buffer)//".vtk"
+
+    !call res_vtk_vit(trim(name_vit))
+    !call res_vtk_omega(trim(name_omg))
+
+    nx = resolution(1)
+    ny = resolution(2)
+    nz = resolution(3)
+    nbpoints = nx*ny*nz
+    write(buffer2, *) rank
+    buffer2 = adjustl(buffer2)
+    localname = trim(filename)//"_velocity_it"//trim(buffer)//"_"//trim(buffer2)//".vti"
+    localname2 = trim(filename)//"_vorticity_it"//trim(buffer)//"_"//trim(buffer2)//".vti"
+    open(unit=11,file=localname,form="formatted")
+    open(unit=12,file=localname2,form="formatted")
+    write(11,'(A26)')"# vtk DataFile Version 3.0"
+    write(11,'(A7)')"vecteur"
+    write(11,'(A5)')"ASCII"
+    write(11,'(A25)')"DATASET STRUCTURED_POINTS"
+    write(11,'(A10,3(i6,1x))')"DIMENSIONS",nx,ny,nz
+    write(11,'(a6,3(f10.5))')"ORIGIN", coordMin
+    write(11,'(A7,3(f10.5))')"SPACING",spacing
+    write(11,'(A10,i10)') "POINT_DATA",nbpoints
+    write(11,'(A21)') "VECTORS velo FLOAT"
+    write(12,'(A26)')"# vtk DataFile Version 3.0"
+    write(12,'(A7)')"vecteur"
+    write(12,'(A5)')"ASCII"
+    write(12,'(A25)')"DATASET STRUCTURED_POINTS"
+    write(12,'(A10,3(i6,1x))')"DIMENSIONS",nx,ny,nz
+    write(12,'(a6,3(f10.5))')"ORIGIN", coordMin
+    write(12,'(A7,3(f10.5))')"SPACING",spacing
+    write(12,'(A10,i10)') "POINT_DATA",nbpoints
+    write(12,'(A21)') "VECTORS vort FLOAT"
+    write(11,'(3(f20.9))') field1(1:nx,1:ny,1:nz)
+    write(12,'(3(f20.9))') field2(1:nx,1:ny,1:nz)
+    close(11)
+    close(12)
+    
+  end subroutine printToVTK
+  
+
+  !> print test function (scalar) values to a vtk file 
+  !> Warning : only for 3D field
+  subroutine printChi3dToVTK(filename,testFunc,resolution,spacing,coordMin)
+    character(len=*), intent(in) :: filename
+    real(mk), dimension(:,:,:), pointer :: testFunc
+    real(mk), dimension(3),intent(in) :: spacing
+    !> Number of points in each dir (1st index) for each sub (2nd index)
+    integer, dimension(3),intent(in)  :: resolution
+    real(mk),dimension(3),intent(in)::coordMin
+ 
+    integer :: nbpoints
+    ! sub proc number
+    ! local resolution
+    integer :: nx,ny,nz
+    
+    !local filename (depends on subproc number)
+    character(len=30) :: localname,buffer
+
+    ! output = nb of iterations
+    nx = resolution(1)
+    ny = resolution(2)
+    nz = resolution(3)
+    nbpoints = nx*ny*nz
+    write(buffer, *) rank
+    buffer = adjustl(buffer)
+    localname = trim(filename)//"_chi_"//trim(buffer)//".vti"
+    open(unit=11,file=localname,form="formatted")
+    write(11,'(A26)')"# vtk DataFile Version 3.0"
+    write(11,'(A7)')"scalaire"
+    write(11,'(A5)')"ASCII"
+    write(11,'(A25)')"DATASET STRUCTURED_POINTS"
+    write(11,'(A10,3(i8,1x))')"DIMENSIONS",nx,ny,nz
+    write(11,'(a6,3(f10.5))')"ORIGIN", coordMin
+    write(11,'(A7,3(f10.5))')"SPACING",spacing
+    write(11,'(A10,i10)') "POINT_DATA",nbpoints
+    write(11,'(A21)') "SCALARS chi FLOAT"
+    write(11,'(A21)') "LOOKUP_TABLE default" 
+    write(11,'(f20.9)') testFunc(1:nx,1:ny,1:nz)
+
+  end subroutine printChi3dToVTK
+  
+  subroutine printScalar3dToVTK(filename,scalar,resolution,spacing,coordMin)
+    character(len=*), intent(in) :: filename
+    real(mk), dimension(:,:,:), pointer :: scalar
+    real(mk), dimension(3),intent(in) :: spacing
+    !> Number of points in each dir (1st index) for each sub (2nd index)
+    integer, dimension(3),intent(in)  :: resolution
+    real(mk),dimension(3),intent(in)::coordMin
+ 
+    integer :: nbpoints
+    ! sub proc number
+    ! local resolution
+    integer :: nx,ny,nz
+    
+    !local filename (depends on subproc number)
+    character(len=30) :: localname,buffer
+
+    ! output = nb of iterations
+    nx = resolution(1)
+    ny = resolution(2)
+    nz = resolution(3)
+    nbpoints = nx*ny*nz
+    write(buffer, *) rank
+    buffer = adjustl(buffer)
+    localname = trim(filename)//"_chi_"//trim(buffer)//".vti"
+    open(unit=11,file=localname,form="formatted")
+    write(11,'(A26)')"# vtk DataFile Version 3.0"
+    write(11,'(A7)')"scalaire"
+    write(11,'(A5)')"ASCII"
+    write(11,'(A25)')"DATASET STRUCTURED_POINTS"
+    write(11,'(A10,3(i4,1x))')"DIMENSIONS",nx,ny,nz
+    write(11,'(a6,3(f10.5))')"ORIGIN", coordMin
+    write(11,'(A7,3(f10.5))')"SPACING",spacing
+    write(11,'(A10,i10)') "POINT_DATA",nbpoints
+    write(11,'(A21)') "SCALARS scal FLOAT"
+    write(11,'(A21)') "LOOKUP_TABLE default" 
+    write(11,'(3(f20.9))') scalar(1:nx,1:ny,1:nz)
+
+  end subroutine printScalar3dToVTK
+
+  subroutine printPvtkFile(filename,spacing,coordMin,coordMax)
+    
+    ! File name prefix for the output
+    character(len=*), intent(in) :: filename
+    real(mk), dimension(3), intent(in) :: spacing
+    real(mk),dimension(3),intent(in) ::coordMin,coordMax
+    character(len=30)::outputname,subfilename
+    real(mk)::x1,x2,y1,y2,z1,z2
+    
+    x1=coordMin(1)
+    x2=coordMax(1)
+    y1=coordMin(2)
+    y2=coordMax(2)
+    z1=coordMin(3)
+    z2=coordMax(3)
+    subfilename="boundaries_chi_0.vti"
+    outputname = trim(filename)//".pvti"
+    open(unit=11,file=outputname,form="formatted")
+    write(11,'(A70)') "<VTKFile type=""PImageData"" version=""0.1"" byte_order=""LittleEndian"">"
+    write(11,'(A25,6(f10.5),A30,3(f10.5),A15,3(f10.5),A3)') "<PImageData WholeExtent=""",x1,x2,y1,y2,z1,z2,&
+         " "" GhostLevel=""2"" Origin=""",coordMin,""" Spacing=""",spacing,""""
+    write(11,'(A30)') "<PPointData Scalars=”Chi”>"
+    write(11,'(A70)') "<PDataArray type=""Float32"" Name=""Chi"" /> "
+    write(11,'(A15)') "</PPointData>"
+    write(11,'(A70)') "<Piece Extent=""x1 x2 y1 y2 z1 z2"" Source=""", subfilename,"""/>"
+
+    write(11,'(A15)') "</PImageData>"
+
+    write(11,'(A11)') "</VTKFile>"
+    
+  end subroutine printPvtkFile
+
+end module io_vtk
+ 
diff --git a/HySoP/src/main/Ctrl b/HySoP/src/main/Ctrl
new file mode 100644
index 0000000000000000000000000000000000000000..608461615dce27312f76ca8f680415ca9454f5c3
--- /dev/null
+++ b/HySoP/src/main/Ctrl
@@ -0,0 +1,61 @@
+#-------------------------------------------------------------------------------
+#		   Philippe, Chatelain, pchatela@inf.ethz.ch
+#		     Michael Bergdorf, bergdorf@inf.ethz.ch
+#			      CSELab, ETH-Zuerich
+#-------------------------------------------------------------------------------
+
+#-------------------------------------------------------------------------------
+#  NAME OF RUN
+#                 
+#=====================================================
+RUNTAG= tubes
+TALK= true
+
+#-------------------------------------------------------------------------------
+#  DOMAIN // COMPUTATIONAL AND PHYSICAL
+#=====================================================
+NX= 65,65,129
+
+#-------------------------------------------------------------------------------
+#  TIME STEPPING
+#=====================================================
+DT= 0.968815
+DT_ADAPT= true
+DTMAX= 10.000000
+TEND= 1000.000000
+ITEND = 10000
+
+#-------------------------------------------------------------------------------
+#  PHYSICAL PARAMETERS
+#=====================================================
+NU= 0.001
+
+#-------------------------------------------------------------------------------
+#  MULTIGRID // maxlev has to cope with nx
+#=====================================================
+MAXLEV    = 6
+
+#-------------------------------------------------------------------------------
+#  INITIAL CONDITION PARAMETERS // rem: none
+#=====================================================
+TRAILING_VORTICES_PRIMARY_SPAN        = 0.7853982
+TRAILING_VORTICES_SECONDARY_SPAN      = 0.403610
+TRAILING_VORTICES_PRIMARY_SIGMA       = 0.222144
+TRAILING_VORTICES_SECONDARY_SIGMA     = 0.10000
+TRAILING_VORTICES_PRIMARY_VERTDIFF    = 0.0
+TRAILING_VORTICES_PRIMARY_GAMMA       = 1.0
+TRAILING_VORTICES_PRIMARY_GAMMA_RATIO = 0.0
+TRAILING_VORTICES_NOISE_SYMMETRY      = true
+TRAILING_VORTICES_NOISE_AMP1        = 0.11
+TRAILING_VORTICES_NOISE_AMP2        = 0.11
+#TRAILING_VORTICES_NOISE_AMP1        = 0.05
+#TRAILING_VORTICES_NOISE_AMP2        = 0.025
+TRAILING_VORTICES_NOISE_THETA1        = 0.785
+TRAILING_VORTICES_NOISE_THETA2        = 0.573710
+
+
+#-------------------------------------------------------------------------------
+#  DUMP EVERY <NDUMP>TH STEP
+#  RUN TIME LIMITATION
+#=====================================================
+NDUMP= 1
diff --git a/HySoP/src/main/Unused_or_obsolet/FieldsComputation.f90 b/HySoP/src/main/Unused_or_obsolet/FieldsComputation.f90
new file mode 100755
index 0000000000000000000000000000000000000000..d6b20e0782e063ceb2cb510128435569aa469870
--- /dev/null
+++ b/HySoP/src/main/Unused_or_obsolet/FieldsComputation.f90
@@ -0,0 +1,83 @@
+!> Some specific (i.e. only dedicated to a particular simulation ...) routines
+!! => pertubation on velocity, offset introduction on velocity ...
+module FieldsComputation
+  
+  use client_data, only: mk,pi
+  use client_topology, only: ppm_t_topo
+  use mpi, only:MPI_COMM_WORLD,MPI_DOUBLE_PRECISION,MPI_SUM
+  implicit none
+
+  private
+
+  public :: fit_velocity, perturb
+
+contains
+ 
+  !> offset velocity to fit with desired flowrate (average)
+  subroutine fit_velocity(vel,lengths,lowerPoint,upperPoint)
+    ! Velocity, intent(inout)
+    real(mk), dimension(:,:,:,:,:), pointer :: vel
+    ! grid step, intent(in)
+    !real(mk), dimension(:), pointer :: stepSize
+    ! dims of the domain, intent(in)
+    real(mk), dimension(:), pointer :: lengths
+    real(mk), dimension(:), pointer :: lowerPoint,upperPoint
+        
+    !real(mk) :: theoretical_flowrate, computed_flowrate,surf
+    real(mk) :: constant
+
+    ! The flow is supposed to be along direction 1
+    ! physical domain surface
+    !surf = (upperPoint(3)-lowerPoint(3))*lengths(2)
+    ! Value of the flow rate we want to fix
+    !theoretical_flowrate = 1.
+    !call computeFlowRate(vel, stepSize, topo,computed_flowrate)
+    ! Correction is added to velocity ...
+    constant = (upperPoint(3)-lowerPoint(3))/lengths(3) !(theoretical_flowrate - computed_flowrate)/surf
+    !    print *, 'const', constant
+    !  print *, computed_flowrate, '//',surf
+    
+    vel(1,:,:,:,1) = vel(1,:,:,:,1) + constant
+
+  end subroutine fit_velocity
+  
+  !> Introduce a small perturbation to velocity
+  subroutine perturb(vel, time)
+    ! Velocity, intent(inout)
+    real(mk), dimension(:,:,:,:,:), pointer :: vel
+    ! current time
+    real(mk) :: time
+
+    vel(2,:,:,:,1) = sin(pi*(time-3.0))
+    
+  end subroutine perturb
+
+  subroutine computeFlowRate(vel, stepSize, topo, flowRate)
+    
+    real(mk), dimension(:,:,:,:,:), pointer :: vel
+    ! grid step, intent(in)
+    real(mk), dimension(:), pointer :: stepSize
+    type(ppm_t_topo), pointer :: topo
+    real(mk), intent(out) :: flowRate
+    
+    integer, dimension(:,:), pointer :: nnodes => NULL()
+    integer :: isub, isubl,info
+    
+    nnodes => topo%mesh(1)%nnodes
+    
+    do isub=1,topo%nsublist
+       isubl=topo%isublist(isub)
+       flowRate = vel(1,1,1,1,isub) + vel(1,1,1,nnodes(3,isubl),isub) +&
+         vel(1,1,nnodes(2,isubl),1,isub) + vel(1,1,nnodes(2,isubl),nnodes(3,isubl),isub)
+       flowRate = 0.25*flowRate + 0.5*(sum(vel(1,1,2:nnodes(2,isubl)-1,1,isub)) + &
+            sum(vel(1,1,2:nnodes(2,isubl)-1,nnodes(3,isubl),isub)) &
+            + sum(vel(1,1,1,2:nnodes(3,isubl)-1,isub)) + sum(vel(1,1,nnodes(2,isubl),2:nnodes(3,isubl)-1,isub))) &
+            + sum(vel(1,1,2:nnodes(2,isubl)-1,2:nnodes(3,isubl)-1,isub))
+       flowRate = stepSize(2)*stepSize(3)*flowRate
+    enddo
+    
+    call MPI_AllReduce(flowRate, flowRate, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD,info)
+    
+  end subroutine computeFlowRate
+
+end module FieldsComputation
diff --git a/HySoP/src/main/unstable/NavierStokes2D.f90 b/HySoP/src/main/unstable/NavierStokes2D.f90
new file mode 100755
index 0000000000000000000000000000000000000000..a2e9b6782af2667a154e366c138de01da100bc9d
--- /dev/null
+++ b/HySoP/src/main/unstable/NavierStokes2D.f90
@@ -0,0 +1,603 @@
+!> This module is used to run a simulation using 
+!! ppm (core and numerics).
+!! Solve Navier-Stokes (vorticity) for a 2D flow around a cylinder.
+!!
+module NavierStokes2D
+  
+  ! All required ppm modules ...
+  use ppm_module_init, only : ppm_init
+  use ppm_module_data, only : ppm_kind_double,ppm_param_bcdef_periodic
+  use ppm_module_finalize, only : ppm_finalize
+  use ppm_module_map_field_ghost, only:ppm_map_field_ghost_get, ppm_map_field_ghost_put
+  use ppm_module_map_field
+  use ppm_module_is_initialized
+  ! use client_io
+  use client_data
+  ! some tools
+  use parmesTools
+  ! Physical domain and grid
+  use Domain
+  ! Fields on the grid
+  use Fields, only: initFields, velocity, vorticity2D, gauss,rhs,scalar!,testFunc!,getMemoryUsedForFields,shiftVelocityX, gauss!vel_ex)!, stream_function
+  use PPMFields
+  ! Topology
+  use client_topology, only: PPMinitTopo,topo,getPPMLocalResolution,meshNum
+  ! Poisson (ppm) solver
+  use Solver, only : init_poisson_solver, solve_poisson, ppm_poisson_drv_none, ppm_poisson_drv_curl_fd2, ppm_poisson_drv_curl_fd4
+  ! Penalisation stuff
+  use penalisation, only : penalise_velocity
+  ! Functions used to identify surfaces, volumes ... 
+  use SetsIndicators,only:chi_sphere,chi_boundary,compute_control_box,init_obstacles,nocaForces,&
+       compute_test,chi_box,getMemoryForIndicators
+  ! curl, prod ...
+  use vectorcalculus
+  ! everything dealing with particles
+  use Particles!, only : initNSSolver_particles,getMemoryUsedForParticles,countAndUpdateParticles,countAndCreateParticles,RK4_2D,&
+  !     createParticlesEverywhere,PPMupdateParticles2D,Ppmremesh2d,RK2_2D
+  ! file io 
+  use io_vtk
+  
+  use mpi
+  
+  ! user-defined functions (for tests or fields initialization)
+  use testsFunctions
+
+  use poisson
+
+!  use solverDiffusion
+
+  implicit none
+
+  ! Some global vars ... 
+  !> counter for memory usage
+  real(mk) :: memoryUsed
+  !> required flow rate (used to shift velocity after Poisson)
+  real(mk) :: reqFlowRate
+  
+contains
+
+  !> All required initialisations :
+  !! MPI, ppm
+  !! Domain, grid
+  !! Fields
+  !! Particles
+  subroutine init_client(Re,localResolution,coordMin)
+
+    use client_topology, only: isubl
+    !> Reynolds numner
+    real(mk),intent(in) ::Re
+    !> local mesh resolution
+    integer, dimension(dim3),intent(out) :: localResolution
+    !> Local (proc.) minimum point coordinates
+    real(mk),dimension(dim3),intent(out) ::coordMin
+
+    ! Precision and tolerance (for ppm init)
+    integer :: prec,tol
+    ! MPI comm
+    integer :: comm
+    ! debug mode (for ppm)
+    integer :: debug
+    
+    !> Size of the control box (user defined)
+    real(mk) :: sizeOfTheBox  
+    !> Dimensions and positions of the control volume
+    real(mk),dimension(dime):: boxMin,boxMax
+    
+    !> For ppm fftw solver
+    integer :: derive
+    
+    !> Sphere radius
+    real(mk) :: sphere_radius 
+    !> Sphere position
+    real(mk),dimension(dime) :: sphere_pos
+    !> velocity inf
+    real(mk),parameter :: uinf = 1.0
+    integer :: nbPoints
+
+    !> Thickness of the boundary layer used to enforce Dirichlet BC with penalisation
+    real(mk) :: layer
+
+    logical :: testInit = .False.
+    integer :: info
+
+    ! "read" mpi parameters
+    call MPI_COMM_RANK(MPI_COMM_WORLD,rank,info)
+    call MPI_COMM_SIZE(MPI_COMM_WORLD,nbprocs,info)
+    comm = MPI_COMM_WORLD  
+    
+    write (*,'(a,i5,a)') '[',rank,'] --------------------------------------------------------------> start initialization '
+
+    !======================
+    ! Init ppm 
+    !======================
+    prec = ppm_kind_double 
+    debug = 0
+    tol = -10
+    info = 0 !!! Warning : info MUST be 0 else ppm does not init and tells nothing ...
+    call ppm_init(dime,prec,tol,comm,debug,info)
+    
+    !======================
+    ! Geometry and grid
+    !======================
+    ! Set domain size, resolution, boundaries, number of ghost points ... 
+    ! At the time, many values are set in Domain.f90 file
+    call init_geometry(ppm_param_bcdef_periodic)
+    call init_grid()
+
+    !======================
+    ! Creates the topology
+    !======================
+    ! Based on ppm. 
+    call PPMinitTopo(physDomainLowerPoint,physDomainUpperPoint,domain_bc,domain_ghostsize,grid_resolution)
+    
+    !> Get the coordinates of the lowest point for the current domain
+    coordMin(:)=topo%min_subd(:,isubl) 
+    coordMin(c_Z) = physDomainUpperPoint(c_Z)
+    !======================
+    ! Fields allocation
+    !======================
+    ! Local number of nodes
+    localResolution = getPPMLocalResolution(topo,meshNum)
+
+    ! Allocate fields on the local mesh (velocity, vorticity ...)
+    call initFields(localResolution,domain_ghostsize)
+    
+    !=======================================================
+    ! Set obstacle (sphere) and other specific parameters.
+    ! Compute a control volume for diagnostics computation
+    !=======================================================
+    !> Set sphere size and position
+!!$    sphere_radius=0.5
+!!$    sphere_pos(c_Y)=(physDomainUpperPoint(c_Y)+physDomainLowerPoint(c_Y))/2.0
+!!$    sphere_pos(c_X) = 0.0
+!!$ !   sphere_pos(c_Z) = 0.0
+!!$    
+!!$    if(rank == 0) then
+!!$       reqFlowRate = requiredFlowRate2D(sphere_radius,domainLength,physDomainLowerPoint,physDomainUpperPoint,uinf)
+!!$    end if
+!!$    call MPI_Bcast(reqFlowRate,1,mpi_mk,0,MPI_COMM_WORLD,info);
+
+    !! We set the size of the box (must be greater than the sphere diameter ...)
+    !    sizeOfTheBox = domainLength(c_Y)-0.3 ! 6*sphere_radius
+    
+    !! Position of the upper and lower points of the box
+    !! Compute the box position on the grid
+    !nbPoints = floor(abs(0.5*(domainLength(c_Z)-sizeOfTheBox)/grid_step(c_Z)))
+    !boxMin=physDomainLowerPoint!+3*grid_step
+    !boxMax=physDomainUpperPoint!-3*grid_step
+!!$    nbPoints = 200
+!!$
+!!$    boxMin = physDomainLowerPoint
+!!$    boxMax = physDomainUpperPoint
+!!$    boxMin(c_Y)=physDomainLowerPoint(c_Y)+ nbPoints*grid_step(c_Y)
+!!$    boxMax(c_Y)=physDomainUpperPoint(c_Y)- nbPoints*grid_step(c_Y)
+!!$    boxMin(c_X)=physDomainLowerPoint(c_X)+ nbPoints*grid_step(c_X)
+!!$    boxMax(c_X)=physDomainUpperPoint(c_X)- (nbPoints+401)*grid_step(c_X)
+!!$    
+    !! compute indicator functions for the control box and the sphere
+  !  call compute_control_box(localResolution,grid_step,boxMin,boxMax,sphere_pos,sphere_radius,coordMin)
+    
+    !====================================================================
+    ! Compute indicator function for the boundaries in z dir 
+    ! (will be used to enforce dirichlet conditions on these boundaries.)
+    !====================================================================
+    layer = 0.0
+   ! call init_obstacles(localResolution,grid_step,physDomainLowerPoint,physDomainUpperPoint,sphere_pos,sphere_radius,layer,coordMin)
+    ! Test : export test function to vtk file.
+    ! compute_test set 1 at all points belonging to chi_... 
+ !   testFunc=0.0
+    !call compute_test(testFunc,chi_boundary)
+    !call compute_test(testFunc,chi_box)
+    !testFunc = 1.0
+    !call printChiToVTK("box",testFunc,localResolution,grid_step,coordMin)
+    !! display the memory used for indicator sets and fields
+ !   memoryUsed=getMemoryUsedForFields()+getMemoryForIndicators()
+!    deallocate(testFunc)
+   
+    !================================
+    ! Solvers (grid and particles) 
+    !================================
+    
+    ! --- Grid solver for Poisson ---
+    ! derive : means that finite difference, order 4, are used to compute velocity from stream function, in ppm
+    !derive = ppm_poisson_drv_curl_fd4 
+    ! init fftw solver, from ppm
+    !call init_poisson_solver(vorticity,velocity,topo%ID,meshNum,derive)
+    
+    ! Initialisation of all the fftw stuff (plans ...).
+    ! Warnings : 
+    ! - this must be done before any initialisation of the fields : plans creation may rewrite data of fieldIn/fieldOut.
+    ! - last argument (resolution) must be the global resolution : dedicated mpi-topologies are created for fftw. 
+    call initFFTW2D(grid_resolution,domainLength)
+    
+    !call diffusionInitFFT(topo%ID,velocity,vorticity,1./Re,domainLength,grid_resolution,physDomainLowerPoint,&
+     !    physDomainUpperPoint,domain_bc)
+    ! --- Particles ---
+    ! Set particles parameters (kernel type, cutoff ...)
+    call initNSSolver_particles()
+    
+    if(verbose) then
+       write(*,'(a,i5,a)') '[',rank,'] ======================================== Summary ========================================'
+       if (rank==0) then
+          write(*,'(a,i5,a,3f10.4)') '[',rank,'] Computational domain (geom) dimensions : ', domainLength
+          write(*,'(a,i5,a,3d20.10)') '[',rank,'] Space step : ', grid_step
+          write(*,'(a,i5,a,f10.4)') '[',rank,'] Reynolds : ', Re
+          write(*,'(a,i5,a,f10.4,3f10.4)') '[',rank,'] Sphere radius and position : ', sphere_radius,sphere_pos
+          write(*,'(a,i5,a,f10.4)') '[',rank,'] The required flow rate is : ', reqFlowRate
+          write(*,'(a,i5,a,i5)') '[',rank,'] Number of points in the boundary layer: ', nbpoints
+       end if
+       write(*,'(a,i5,a,3f10.5)') '[',rank,'] Current subdomain (geom) position : ', coordMin
+       write(*,'(a,i5,a,3i10)') '[',rank,'] Current subdomain resolution : ', localResolution
+       write(*,'(a,i5,a)') '[',rank,'] ========================================================================================='
+    end if
+    
+    write (*,'(a,i5,a)') '[',rank,'] --------------------------------------------------------------> end of initialization'
+
+  end subroutine init_client
+ 
+  subroutine main_client() bind(c,name='NavierStokes2D')
+
+    real(mk) :: initial_time,final_time
+    real(mk) :: Re
+    real(mk) :: t1
+    real(mk), dimension(dim3) :: coordMin
+    !> local mesh resolution
+    integer, dimension(dim3) :: localResolution
+
+    real(mk) :: shift = 0.0
+    real(mk),dimension(dime)::coord
+    integer :: i,info,j
+    character(len=30) :: buffer
+    real(mk),dimension(dime)::center
+    Re = 133.4! 210.6
+    initial_time = 0.0
+    final_time = 100.0
+   ! Initialisation stuff 
+    t1 = MPI_WTIME()
+    call init_client(Re,localResolution,coordMin)
+    write (*,'(a,i5,a,f10.5)')  '[',rank,'] Initialisation time: ', MPI_WTIME()-t1
+
+    
+    do j=1,localResolution(c_Y)
+       coord(c_Y) = coordMin(c_Y) + (j-1)*grid_step(c_Y)
+       do i =1,localResolution(c_X)
+          coord(c_X) = coordMin(c_X) + (i-1)*grid_step(c_X)
+          vorticity2D(i,j,1) = -4.*pi*pi*cos(2.*pi*coord(c_X)/domainLength(c_X))/(domainLength(c_X)**2)! + cos(2.*pi*coord(c_Y)/domainLength(c_Y)) 
+       end do
+    end do
+
+    PPMvelocity2D(c_Y,:,:,:) = 0.0
+    !rhs(:,:,:,:) = velocity(:,:,:,:)
+    !vorticity2D(:,:,:) = 1.0!velocity(:,:,:,:)
+    !vorticity2D(1:localResolution(c_X)-1,1:localResolution(c_Y)-1,1) = 0.0
+    !print *, "N2",norm2(velocity,localResolution,grid_step)!)velocity(c_X,:,:,:)
+    !print *, "N2 v", norm2(vorticity2D,localResolution,grid_step)!)vorticity(c_X,:,:,:)
+    call solvePoisson(vorticity2D,velocity,grid_resolution,topo%ID,meshNum)
+    print *, "end of poisson solve"
+!!$    call MPI_BARRIER(MPI_COMM_WORLD,info)
+!!$
+!!$    print *, "post N2",norm2(velocity,localResolution,grid_step)!)velocity(c_X,:,:,:)
+!!$    !print *, "post N2 v", norm2(vorticity2D,localResolution,grid_step)!)vorticity(c_X,:,:,:)
+!!$    do i = 1, localResolution(c_X)
+!!$       print *,"------------", velocity(c_X,i,:,1)
+!!$       print *,"************", vorticity2D(i,:,1)
+!!$    end do
+!!$
+!!$
+    
+    !vorticity2D = vorticity - rhs
+    !print *, "results :", norm1(vorticity,localResolution,grid_step)
+
+
+
+
+!    call Mpi_barrier(MPI_COMM_WORLD,info)
+
+
+    write(buffer,*) rank
+    buffer = adjustl(buffer)
+    buffer = "gauss"//buffer
+    open(43,file=buffer) ! Output only for one process
+    do j=1,localResolution(c_Y)
+       coord(c_Y) = coordMin(c_Y) + (j-1)*grid_step(c_Y)
+    !   do i =1,localResolution(c_X)
+          !coord(c_X) = coordMin(c_X) + (i-1)*grid_step(c_X)
+       write(43,'(4e14.5)') coord(c_Y),velocity(c_X,5,j,1) 
+     !  end do
+    end do
+    
+   close(43)
+!!$    shift = 0!-500.*1e-3
+!!$    ! init a Gaussian
+!!$    center = 0.0
+!!$    !call Gaussian1D(scalar,localResolution,grid_step,coordMin,c_X,shift)
+!!$!!    call Gaussian2D(scalar,localResolution,grid_step,coordMin,center)
+!!$    center(c_X) = 100.*1e-3
+    !shift = 100.*1e-3
+    !    call Gaussian1D(gauss,localResolution,grid_step,coordMin,c_X,shift)
+!!$    call Gaussian2D(gauss,localResolution,grid_step,coordMin,center)
+!!$    call ppm_map_field_ghost_get(topo%ID,meshNum,domain_ghostsize,info)
+!!$    call ppm_map_field_push(topo%ID,meshNum,PPMscalar2D,info)
+!!$    call ppm_map_field_send(info)
+!!$    call ppm_map_field_pop(topo%ID,meshNum,PPMscalar2D,domain_ghostsize,info)
+    ! Time loop 
+!    call timeLoop(initial_time,final_time,Re,localResolution,coordMin)
+!!$    rhs = 0.0
+!!$    rhs(1,:,:,:) = gauss - scalar
+!!$    
+!!$    print *, rank,"norms ", norm1(rhs,localResolution,grid_step),norm2(rhs,localResolution,grid_step),normInf(rhs,localResolution)
+!!$    !print *, "jzaiziaui", rank,rhs(1,65,12,1),rhs(1,1,12,1)
+!!$    !do j = 1, localResolution(c_Y)
+!!$    j = 43
+!!$       coord(c_Y) =coordMin(c_Y) + (j-1)*grid_step(c_Y) 
+!!$       do i = 1,localResolution(c_X)
+!!$          coord(c_X) = coordMin(c_X) + (i-1)*grid_step(c_X)
+!!$          if(gauss(i,j,1)< 1e-5) gauss(i,j,1)=0.0
+!!$          if(scalar(i,j,1)< 1e-5) scalar(i,j,1)=0.0
+!!$          write(43,'(4e14.5)') coord(c_X),coord(c_Y),real(gauss(i,j,1)),real(PPMscalar2D(i,j,1))
+!!$       end do
+    !end do
+
+!!$    print *,"max diff", maxloc(rhs),maxval(rhs)
+!!$    close(43)
+    !call printToVTK("gauss",0,gauss,vorticity,localResolution,grid_step,coordMin)
+    
+    ! Close everything related to ppm
+    call ppm_finalize(info)
+    write (*,'(a,i5,a)')  '[',rank,'] ==================================== End of simulation. ===================================='
+
+  end subroutine main_client
+
+
+  !> Simulation over time ...
+  subroutine timeLoop(initial_time,final_time,Re,resolution,coordMin)
+
+    !> Starting time
+    real(mk), intent(in) :: initial_time
+    !> Ending time
+    real(mk), intent(in) :: final_time
+    !> Reynolds number
+    real(mk), intent(in) :: Re
+    !> Local mesh resolution
+    integer, dimension(dim3) :: resolution
+    !> coordinates of the lowest point of the domain
+    real(mk),dimension(dim3),intent(in) :: coordMin
+
+    real(mk) :: current_time,elapsed_time,time_step
+    integer :: iter,i,j,k
+    logical :: resetpos
+    real(mk) :: nu,dtMax
+    !    real(mk), dimension(:,:), pointer :: err1, err2, errinf
+    ! diagnostics 1:3 -> drag/lifts "porous" drag, 4:6 Winkelmans paper method
+    !    real(mk), dimension(dime) :: diagnostics 
+    real(mk),dimension(dime)::force
+
+    real(mk)::dvol,x,z,y
+    real(mk),dimension(dime) :: norm2Vel,nref
+    integer :: info
+    integer,parameter :: maxiter = 1
+    dvol=product(grid_step)
+    nu = 1./Re!0.5*FolkeRatio/Re!*uinf
+    current_time = initial_time
+    iter = 1
+    
+    ! Max value allowed for the time step
+    dtMax = 1e-3!0.01!grid_step(1)**2/(6.*nu)
+    print *, "dtmax", dtMax
+    ! offset for velocity to set a desired average value for flow rate.
+    ! Initial time step
+    time_step=dtMax
+
+    if(rank==0) open(10,file='diagnostics') ! Output only for one process
+    
+    !! Compute a first distribution of velocity and vorticity
+    !call computeInitialFields(time_step,resolution,grid_step,coordMin)
+!
+    ! Synchronise all procs after initialization process ...
+    call MPI_BARRIER(MPI_COMM_WORLD,info)
+    write (*,'(a,i5,a)')  '[',rank,'] -------------------------------------------------------------->  start simulation'
+    
+    resetpos = .TRUE.
+    !current_time=final_time
+    !do while(current_time <= final_time)
+    do while(iter <= maxiter)
+       elapsed_time = MPI_Wtime()
+       ! Perturbation step, only required for high Reynolds number 
+       ! if( current_time>3.0 .and. current_time<4.0) then
+       !   call perturb(velocity, current_time)
+       ! end if
+       
+       !============================================================ 
+       ! Compute the diagnostics, Noca method
+       !============================================================ 
+       !call nocaForces(force,velocity,vorticity,nu,coordMin,grid_step,time_step,dvol)
+       
+       if(rank == 0) then
+          write(*,'(i5,a,3f10.5)') iter, ' drag: ', force
+          write(10,'(11e14.5)') current_time,force
+       end if
+
+       !============================================================ 
+       ! Solve Navier-Stokes using particular method
+       !============================================================ 
+       call PPMupdateParticles2DScalar(PPMscalar2D,.true.,topo%ID,meshNum,PPMvelocity2D)
+       call RK2_2DScalar(time_step,topo%ID,meshNum,domain_ghostsize,PPMvelocity2D)
+       call PPMremesh2DScalar(topo%ID,meshNum,domain_ghostsize,PPMscalar2D)
+         
+       !============================================================ 
+       ! Solve Poisson for the new vorticity --> velocity
+       !============================================================ 
+       ! Compute velocity from vorticity
+       ! Two steps:
+       ! - solve Poisson for stream_function and update velocity from stream_function : everything is supposed
+       ! to be done in ppm routine; indeed we do not have to deal with stream_function.
+       ! - update velocity to fit with a required flow rate
+       ! Solve poisson to find velocity 
+!!$       call solve_poisson(vorticity,velocity,topo%ID,meshNum,domain_ghostsize)
+!!$       call ppm_map_field_ghost_get(topo%ID,meshNum,domain_ghostsize,info)
+!!$       call ppm_map_field_push(topo%ID,meshNum,velocity,3,info)
+!!$       call ppm_map_field_send(info)
+!!$       call ppm_map_field_pop(topo%ID,meshNum,velocity,3,domain_ghostsize,info)
+!!$       print *, "POST poisson", minval(vorticity),maxval(vorticity),minval(velocity),maxval(velocity)
+!!$       print *, 'veloc', sum(velocity(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+!!$       print *, 'vort',sum(vorticity(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(vorticity(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(vorticity(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+
+!!$
+!!$       call shiftVelocityX(reqFlowRate,grid_step,resolution,domainLength(c_Y)*domainLength(c_Z),coordMin,physDomainLowerPoint)
+!!$       call ppm_map_field_ghost_get(topo%ID,meshNum,domain_ghostsize,info)
+!!$       call ppm_map_field_push(topo%ID,meshNum,velocity, 3, info)
+!!$       call ppm_map_field_send(info)
+!!$       call ppm_map_field_pop(topo%ID,meshNum,velocity,3,domain_ghostsize,info)
+
+!!$       print *, "POST shift", minval(vorticity),maxval(vorticity),minval(velocity),maxval(velocity)
+!!$       print *, 'veloc', sum(velocity(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+!!$       print *, 'vort',sum(vorticity(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(vorticity(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(vorticity(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+
+       !============================================================ 
+       ! Penalize velocity on the sphere and on the boundaries
+!!$       !============================================================ 
+!!$       call penalise_velocity(velocity,time_step,chi_sphere,chi_boundary)
+!!$
+!!$       call ppm_map_field_ghost_get(topo%ID,meshNum,domain_ghostsize,info)
+!!$       call ppm_map_field_push(topo%ID,meshNum,velocity, 3, info)
+!!$       call ppm_map_field_send(info)
+!!$       call ppm_map_field_pop(topo%ID,meshNum,velocity,3,domain_ghostsize,info)
+!!$       
+!!$       print *, "POST penal", minval(vorticity),maxval(vorticity),minval(velocity),maxval(velocity)
+!!$       print *, 'veloc', sum(velocity(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+!       call printToVTK("post-penal",iter,velocity,vorticity,resolution,grid_step,coordMin)
+       
+       
+       !============================================================ 
+       ! Compute the new "penalized" vorticity
+       !============================================================ 
+!!$       call curlDF4(velocity,vorticity,resolution,grid_step)
+!!$       call ppm_map_field_ghost_get(topo%ID,meshNum,domain_ghostsize,info)
+!!$       call ppm_map_field_push(topo%ID,meshNum,vorticity,3,info)
+!!$       call ppm_map_field_send(info)
+!!$       call ppm_map_field_pop(topo%ID,meshNum,vorticity,3,domain_ghostsize,info)
+!!$       
+!!$
+       !============================================================ 
+       ! Compute stretch/diffusion from current velocity/vorticity
+       !============================================================ 
+!!$       call computeRHS(velocity,vorticity,rhs,resolution,grid_step,nu)
+!!$       !!call computeStretch(velocity,vorticity,rhs,resolution,grid_step)
+!!$       call ppm_map_field_ghost_get(topo%ID,meshNum,domain_ghostsize,info)
+!!$       call ppm_map_field_push(topo%ID,meshNum,rhs,3,info)
+!!$       call ppm_map_field_send(info)
+!!$       call ppm_map_field_pop(topo%ID,meshNum,rhs,3,domain_ghostsize,info)
+       
+!!$       print *, 'veloc', sum(velocity(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(velocity(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+!!$       print *, 'vort',sum(vorticity(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(vorticity(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(vorticity(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+!!$       print *, 'stretch',sum(rhs(c_X,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(rhs(c_Y,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1)),&
+!!$            sum(rhs(c_Z,1:resolution(c_X)-1,1:resolution(c_Y)-1,1:resolution(c_Z)-1,nsublist))&
+!!$            /((resolution(c_X)-1)*(resolution(c_Y)-1)*(resolution(c_Z)-1))
+!!$       print *, 'max v...', maxval(velocity(c_X,:,:,:,:)), maxval(velocity(c_Y,:,:,:,:)), maxval(velocity(c_Z,:,:,:,:))
+!!$       print *, 'min v...', minval(velocity(c_X,:,:,:,:)), minval(velocity(c_Y,:,:,:,:)), minval(velocity(c_Z,:,:,:,:))
+!!$       print *, 'max w...', maxval(vorticity(c_X,:,:,:,:)), maxval(vorticity(c_Y,:,:,:,:)), maxval(vorticity(c_Z,:,:,:,:))
+!!$       print *, 'min w...', minval(vorticity(c_X,:,:,:,:)), minval(vorticity(c_Y,:,:,:,:)), minval(vorticity(c_Z,:,:,:,:))
+!!$       print *, 'max s...', maxval(rhs(c_X,:,:,:,:)), maxval(rhs(c_Y,:,:,:,:)), maxval(rhs(c_Z,:,:,:,:))
+!!$       print *, 'min s...', minval(rhs(c_X,:,:,:,:)), minval(rhs(c_Y,:,:,:,:)), minval(rhs(c_Z,:,:,:,:))
+!!$
+!!$
+
+
+
+!!$       print *, 'indices ...', maxloc(vorticity(:,:,:,:,:)),'toto',minloc(vorticity(c_Y,:,:,:,:))
+!!$
+!!$
+!!$       print *, 'vort', vorticity(c_Y,-1,-1,4,1)
+!!$       print *, 'vel', velocity(c_X,1,1,4,1), velocity(c_X,1,1,128,1)
+!!$       print *, 'vel +1', velocity(c_X,1,1,5,1), velocity(c_X,1,1,129,1)
+!!$       print *, 'vel', velocity(c_X,1,1,6,1), velocity(c_X,1,1,130,1)
+!!$       print *, 'vel -1', velocity(c_X,1,1,3,1), velocity(c_X,1,1,127,1)
+!!$       print *, 'vel', velocity(c_X,1,1,2,1), velocity(c_X,1,1,126,1)
+ 
+       
+       ! Update time
+       current_time = current_time + time_step
+       
+       if(verbose) then 
+          !memoryUsed = memoryUsed + getMemoryUsedForParticles()
+          write(*,'(a,i5,a,i10)') "[", rank,"] end of iter ",iter
+          write(*,'(a,i5,a,f10.4)')  "[", rank,"] simulation time : ", MPI_WTime()-elapsed_time
+          write(*,'(a,i5,a,f14.10)')  "[", rank,"] current time step : ", time_step
+          write(*,'(a,i5,a,f10.4)')  "[", rank,"] Memory used : ",  memoryUsed
+          write(*,'(a,i5,a,f14.10)')  "[", rank,"] current time : ", current_time
+       end if
+
+       ! Compute the new time step according to vorticity maximum value
+       !    call updateTimeStep(time_step,vorticity,dtMax)
+       
+       ! Output every 20 time unit ...
+!       if(mod(current_time,20.).lt.time_step) then
+ !         call printToVTK("run",iter,velocity,vorticity,resolution,grid_step,coordMin)
+  !     end if
+       
+       iter = iter+1
+       !call MPI_BARRIER(MPI_COMM_WORLD,info)
+    enddo
+    
+    if(rank==0) close(10)
+
+  end subroutine timeLoop
+
+  subroutine updateTimeStep(dt,ref_field,dtMax)
+    
+    real(mk), dimension(:,:,:,:,:), pointer :: ref_field
+    real(mk), intent(inout) :: dt
+    real(mk),intent(in)::dtMax
+
+    real(mk) :: local_max,omega_max
+    integer :: info
+
+    local_max = maxval(ref_field(:,:,:,:,nsublist)) ! We ignore the ppm nsubs stuff ...
+    call MPI_ALLReduce(local_max,omega_max,1,MPI_DOUBLE_PRECISION,MPI_MAX,MPI_COMM_WORLD,info)
+    dt = min(0.25/omega_max,dtMax)
+    
+  end subroutine updateTimeStep
+
+
+
+
+end module NavierStokes2D
+
diff --git a/HySoP/src/main/unstable/NavierStokes3D.f90 b/HySoP/src/main/unstable/NavierStokes3D.f90
new file mode 100755
index 0000000000000000000000000000000000000000..20be7b68fa73438cacbb30dbe649c5731caa4c61
--- /dev/null
+++ b/HySoP/src/main/unstable/NavierStokes3D.f90
@@ -0,0 +1,444 @@
+!> This module is used to run a simulation using 
+!! ppm (core and numerics)
+!!
+module ppmNavierStokes3D
+  
+  ! All required ppm modules ...
+  use ppm_module_init, only : ppm_init
+  use ppm_module_data, only : ppm_kind_double
+  use ppm_module_finalize, only : ppm_finalize
+  use ppm_module_map_field_ghost, only:ppm_map_field_ghost_get, ppm_map_field_ghost_put
+  use ppm_module_map_field
+  use ppm_module_data, only: ppm_param_bcdef_periodic 
+
+  ! use client_io
+  use client_data
+  ! some tools
+  use parmesTools
+  ! Physical domain and grid
+  use Domain
+  ! Fields on the grid
+  use Fields, only: init_fields, velocity, vorticity, rhs,testFunc,getMemoryUsedForFields!, vel_ex!, stream_function
+  ! Topology
+  use client_topology, only: init_topo,topo,getPPMLocalResolution
+  ! Poisson (ppm) solver
+  use Solver, only : init_poisson_solver, solve_poisson, ppm_poisson_drv_none, ppm_poisson_drv_curl_fd2, ppm_poisson_drv_curl_fd4
+  ! Penalisation stuff
+  use penalisation, only : penalise_velocity
+  ! Functions used to identify surfaces, volumes ... 
+  use SetsIndicators,only:chi_sphere,chi_boundary,compute_control_box,init_boundary_layer,nocaForces,&
+       compute_test,chi_box,getMemoryForIndicators
+  ! curl, prod ...
+  use vectorcalculus, only: curldf4, computeRHS, norm1, norm2, normInf
+  ! everything dealing with particles
+  use Particles, only : init_particles,update_particles,push_particles,remesh,getMemoryUsedForParticles
+  ! file io 
+  use io_vtk
+  
+  use mpi
+  
+  ! user-defined functions (for tests or fields initialization)
+  use testsFunctions
+
+  implicit none
+
+  integer, private :: info 
+
+  ! Some global vars ... 
+  !> Sphere radius
+  real(mk),private :: sphere_radius
+  !> Sphere position
+  real(mk),dimension(dime),private :: sphere_pos
+  !> counter for memory usage
+  real(mk) :: memoryUsed
+  
+contains
+
+  !> All required initialisations :
+  !! MPI, ppm
+  !! Domain, grid
+  !! Fields
+  !! Particles
+  subroutine init_client(Re,localResolution,coordMin)
+
+    use client_topology, only: isubl
+    !> Reynolds numner
+    real(mk),intent(in) ::Re
+    !> local mesh resolution
+    integer, dimension(dime),intent(out) :: localResolution
+    !> Local (proc.) minimum point coordinates
+    real(mk),dimension(dime),intent(out) ::coordMin
+
+    ! Precision and tolerance (for ppm init)
+    integer :: prec,tol
+    ! MPI comm
+    integer :: comm
+    ! debug mode (for ppm)
+    integer :: debug
+    
+
+    !> Dimensions and positions of the control volume
+    real(mk),dimension(dime):: boxMin,boxMax
+
+    !> For ppm fftw solver
+    integer :: derive
+
+    memoryUsed = 0
+
+    ! "read" mpi parameters
+    call MPI_COMM_RANK(MPI_COMM_WORLD,rank,info)
+    call MPI_COMM_SIZE(MPI_COMM_WORLD,nbprocs,info)
+    comm = MPI_COMM_WORLD  
+    
+    write (*,'(a,i5,a)') '[',rank,'] --------------------------------------------------------------> start initialization '
+
+    !======================
+    ! Init ppm 
+    !======================
+    prec = ppm_kind_double ! Defined in ppm_param.h
+    debug = 0
+    tol = -10
+    info = -1
+    call ppm_init(dime,prec,tol,comm,debug,info)
+    
+    !======================
+    ! Geometry and grid
+    !======================
+    ! Set domain size, resolution, boundaries, number of ghost points ... 
+    ! At the time, many values are set in Domain.f90 file
+    call init_geometry(ppm_param_bcdef_periodic)
+    call init_grid()
+
+    !======================
+    ! Creates the topology
+    !======================
+    ! Based on ppm. 
+    call init_topo(falseLowerPoint,falseUpperPoint,domain_bc,domain_ghostsize,grid_resolution)
+    
+    !> Get the coordinates of the lowest point for the current domain
+    coordMin(:)=topo%min_subd(:,isubl) 
+
+    !======================
+    ! Fields allocation
+    !======================
+    ! Local number of nodes
+    localResolution = getPPMLocalResolution()
+    ! Allocate fields (velocity, vorticity ...)
+    call init_fields(localResolution,domain_ghostsize)
+    
+    !=======================================================
+    ! Compute a control volume for diagnostics computation
+    !=======================================================
+    !! Position of the upper and lower points of the box
+    boxMin=falseLowerPoint!+1.0!15*grid_step
+    boxMax=falseUpperPoint!15*grid_step
+    !print *, rank,'sizes',boxMax-boxMin
+    !print *, rank, 'sx', 0.5*(boxMax(3)-boxMin(3))*(boxMax(2)-boxMin(2)),(boxMax(3)-boxMin(3))*(boxMax(2)-boxMin(2))
+    !print *, rank, 'sy', 0.5*(boxMax(3)-boxMin(3))*(boxMax(1)-boxMin(1)),(boxMax(3)-boxMin(3))*(boxMax(1)-boxMin(1))
+    !print *, rank, 'sz', 0.5*(boxMax(1)-boxMin(1))*(boxMax(2)-boxMin(2)),(boxMax(1)-boxMin(1))*(boxMax(2)-boxMin(2))
+    !! Sphere radius and position
+    sphere_radius=0.5
+    sphere_pos=(upperPoint+lowerPoint)/2.0
+
+    !! compute indicator functions for the control box and the sphere
+    call compute_control_box(localResolution,grid_step,boxMin,boxMax,sphere_pos,sphere_radius,coordMin)
+    !! Check :  == 1 inside the box (excluding the sphere), 0 elsewhere
+    
+    !=============================================================
+    ! Compute indicator function for the boundaries in z dir 
+    ! (will be used to enforce dirichlet conditions on these bound.)
+    !=============================================================
+    call init_boundary_layer(localResolution,grid_step,lowerPoint,upperPoint,coordMin)
+
+    ! Test : export test function to vtk file.
+    ! compute_test set 1 at all points belonging to chi_... 
+    testFunc=0.0
+    call compute_test(testFunc,chi_boundary)
+    call compute_test(testFunc,chi_box)
+    call printChiToVTK("box",testFunc,localResolution,grid_step,coordMin)
+    !! display the memory used for indicator sets and fields
+    memoryUsed=getMemoryUsedForFields()+getMemoryForIndicators()
+    deallocate(testFunc)
+   
+    !================================
+    ! Solvers (grid and particles) 
+    !================================
+    
+    ! --- Grid solver for Poisson ---
+    ! derive : means that finite difference, order 4, are used to compute velocity from stream function, in ppm
+    derive = ppm_poisson_drv_curl_fd4 
+    ! init fftw solver, from ppm
+    call init_poisson_solver(vorticity,velocity,topo%ID,topo%mesh(1)%ID,derive)
+    
+    ! --- Particles ---
+    ! Set particles parameters (kernel type, cutoff ...)
+    call init_particles()
+      
+    if(verbose) then
+       !   write(*,'(a,i5,a100)') '[',rank,'] ============================================= Summary ============================================='
+       write(*,'(a,i5,a)') '[',rank,'] ======================================== Summary ========================================'
+       if (rank==0) then
+          write(*,'(a,i5,a,3f10.4)') '[',rank,'] Computational domain (geom) dimensions : ', lengths
+          write(*,'(a,i5,a,3d20.10)') '[',rank,'] Space step : ', grid_step
+          write(*,'(a,i5,a,f10.4)') '[',rank,'] Reynolds : ', Re
+       end if
+       write(*,'(a,i5,a,3f10.5)') '[',rank,'] Current subdomain (geom) position : ', coordMin
+       write(*,'(a,i5,a,3i10)') '[',rank,'] Current subdomain resolution : ', localResolution
+ !      write(*,'(a,i5,a)') '[',rank,'] ===================================================================================================='
+       write(*,'(a,i5,a)') '[',rank,'] ========================================================================================='
+    end if
+    
+    write (*,'(a,i5,a)') '[',rank,'] --------------------------------------------------------------> end of initialization'
+
+  end subroutine init_client
+  
+  subroutine computeInitialFields(fit_velo,dt,resolution,step,coordMin)
+    !> offset value for velocity to fit with a given flow rate
+    real(mk),intent(in) :: fit_velo
+    !> current time step
+    real(mk),intent(in) ::dt
+    !> local mesh resolution
+    integer,dimension(dime),intent(in) :: resolution
+    !> mesh step sizes
+    real(mk),dimension(dime),intent(in)::step
+    !> Local (proc.) minimum point coordinates
+    real(mk),dimension(dime),intent(in) ::coordMin
+    ! set velocity to 0. and compute a full vorticity distribution, such that
+    ! Omega_y = -3z/(Lz^2)
+    velocity = 0.0
+    call init_vorticity(vorticity,resolution,step,coordMin,lowerPoint,upperPoint)
+    ! Init ghost values for vorticity
+    call ppm_map_field_ghost_get(topo%ID,topo%mesh(1)%ID,domain_ghostsize,info)
+    call ppm_map_field_push(topo%ID,topo%mesh(1)%ID,vorticity, 3, info)
+    call ppm_map_field_send(info)
+    call ppm_map_field_pop(topo%ID,topo%mesh(1)%ID,vorticity,3,domain_ghostsize,info)
+    ! Solve poisson for velocity
+    call solve_poisson(vorticity,velocity,topo%ID,topo%mesh(1)%ID,domain_ghostsize)
+    velocity(c_X,:,:,:,:) = velocity(c_X,:,:,:,:) + fit_velo
+    ! Penalize to take sphere and boundaries into account
+    call penalise_velocity(velocity,dt,chi_sphere,chi_boundary)
+    call ppm_map_field_ghost_get(topo%ID,topo%mesh(1)%ID,domain_ghostsize,info)
+    call ppm_map_field_push(topo%ID,topo%mesh(1)%ID,velocity, 3, info)
+    call ppm_map_field_send(info)
+    call ppm_map_field_pop(topo%ID,topo%mesh(1)%ID,velocity,3,domain_ghostsize,info)
+    ! Compute and map the new "penalized" vorticity
+    call curlDF4(velocity,vorticity,resolution,step)
+    call ppm_map_field_ghost_get(topo%ID,topo%mesh(1)%ID,domain_ghostsize,info)
+    call ppm_map_field_push(topo%ID,topo%mesh(1)%ID,vorticity,3,info)
+    call ppm_map_field_send(info)
+    call ppm_map_field_pop(topo%ID,topo%mesh(1)%ID,vorticity,3,domain_ghostsize,info)
+    
+  end subroutine computeInitialFields
+
+  subroutine main_client() bind(c,name='NavierStokes3D')
+
+    real(mk) :: initial_time,final_time
+    real(mk) :: Re
+    real(mk) :: t1
+    real(mk), dimension(dime) :: coordMin
+    !> local mesh resolution
+    integer, dimension(dime) :: localResolution
+    Re = 133.4! 210.6
+    initial_time = 0.0
+    final_time = initial_time!20.
+    
+    ! Initialisation stuff 
+    t1 = MPI_WTIME()
+    call init_client(Re,localResolution,coordMin)
+    write (*,'(a,i5,a,f10.5)')  '[',rank,'] Initialisation time: ', MPI_WTIME()-t1
+    
+    ! Time loop 
+    call timeLoop(initial_time,final_time,Re,localResolution,coordMin)
+
+    ! Close everything related to ppm
+    call ppm_finalize(info)
+    write (*,'(a,i5,a)')  '[',rank,'] ================== End of simulation. =================='
+
+  end subroutine main_client
+
+
+  !> Simulation over time ...
+  subroutine timeLoop(initial_time,final_time,Re,resolution,coordMin)
+
+    !> Starting time
+    real(mk), intent(in) :: initial_time
+    !> Ending time
+    real(mk), intent(in) :: final_time
+    !> Reynolds number
+    real(mk), intent(in) :: Re
+    !> Local mesh resolution
+    integer, dimension(dime) :: resolution
+    !> coordinates of the lowest point of the domain
+    real(mk),dimension(dime),intent(in) :: coordMin
+
+    real(mk) :: current_time,elapsed_time,time_step
+    integer :: iter
+    logical :: resetpos
+    real(mk) :: fit_velo,nu,dtMax
+    !    real(mk), dimension(:,:), pointer :: err1, err2, errinf
+    ! diagnostics 1:3 -> drag/lifts "porous" drag, 4:6 Winkelmans paper method
+    !    real(mk), dimension(dime) :: diagnostics 
+    real(mk),dimension(dime)::force
+
+    real(mk)::dvol
+
+    dvol=product(grid_step)
+    nu =1./Re
+    current_time = initial_time
+    iter = 1
+
+    ! Max value allowed for the time step
+    dtMax = grid_step(1)**2/(6.*nu)
+    
+    ! offset for velocity to set a desired average value for flow rate.
+    fit_velo =  (upperPoint(c_Z)-lowerPoint(c_Z))/lengths(c_Z)
+    ! Initial time step
+    time_step=dtMax
+
+    if(rank==0) open(10,file='diagnostics') ! Output only for one process
+
+    !! Compute a first distribution of velocity and vorticity
+    call computeInitialFields(fit_velo,time_step,resolution,grid_step,coordMin)
+    call printToVTK("init",iter,velocity,vorticity,topo,grid_step)
+    force=0.0
+    call nocaForces(force,velocity,vorticity,1./Re,coordMin,grid_step,time_step,dvol)
+    if(rank==0) print*, 'Initial drag ', force
+    ! Synchronise all procs after initialization process ...
+    call MPI_BARRIER(MPI_COMM_WORLD,info)
+    write (*,'(a,i5,a)')  '[',rank,'] -------------------------------------------------------------->  start simulation'
+    resetpos = .TRUE.
+    do while(current_time <= final_time)
+       elapsed_time = MPI_Wtime()
+       ! Perturbation step, only required for high Reynolds number 
+       ! if( current_time>3.0 .and. current_time<4.0) then
+       !   call perturb(velocity, current_time)
+       ! end if
+       
+       !============================================================ 
+       ! Compute stretch/diffusion from current velocity/vorticity
+       !============================================================ 
+
+
+       print *, rank, 'vel X', velocity(1,10,10,-1:3,1)
+       print *, rank, 'vel X', velocity(1,10,10,resolution(3)-2:resolution(3)+2,1)
+       print *, rank, 'vort X', vorticity(1,10,10,-1:3,1)
+       print *, rank, 'vort X', vorticity(1,10,10,resolution(3)-2:resolution(3)+2,1)
+
+       call computeRHS(velocity,vorticity,rhs,resolution,grid_step,nu)
+       print *, rank, 'rhs pre', minval(rhs),maxval(rhs)
+       print *, rank, 'rhs preX', rhs(1,10,10,-1:3,1)
+       print *, rank, 'rhs preX', rhs(1,10,10,resolution(3)-2:resolution(3)+2,1)
+!       print *, rank, 'rhs preY', rhs(2,10,10,-1:3,1),rhs(2,10,10,resolution(3)-2:resolution(3)+2,1)
+ !      print *, rank, 'rhs pre2', rhs(3,10,10,-1:3,1),rhs(3,10,10,resolution(3)-2:resolution(3)+2,1)
+       
+       call ppm_map_field_ghost_get(topo%ID,topo%mesh(1)%ID,domain_ghostsize,info)
+       call ppm_map_field_push(topo%ID,topo%mesh(1)%ID,rhs,3,info)
+       call ppm_map_field_send(info)
+       call ppm_map_field_pop(topo%ID,topo%mesh(1)%ID,rhs,3,domain_ghostsize,info)
+          
+       print *, rank, 'rhs', minval(rhs),maxval(rhs)
+       print *, rank, 'rhs postX', rhs(1,10,10,-1:3,1)
+       print *, rank, 'rhs postX', rhs(1,10,10,resolution(3)-2:resolution(3)+2,1)
+  !     print *, rank, 'rhs postY', rhs(2,10,10,-1:3,1),rhs(2,10,10,resolution(3)-2:resolution(3)+2,1)
+   !    print *, rank, 'rhs post2', rhs(3,10,10,-1:3,1),rhs(3,10,10,resolution(3)-2:resolution(3)+2,1)
+
+       !============================================================ 
+       ! Solve Navier-Stokes using particular method
+       !============================================================ 
+       ! Initialize the particles according to the last computed vorticity, velocity and rhs
+       call update_particles(vorticity,resetpos,topo%ID,topo%mesh(1)%ID,velocity)
+       ! Integrate
+       call push_particles(time_step,topo%ID,topo%mesh(1)%ID,domain_ghostsize,velocity,rhs)
+       ! Remesh
+       call remesh(topo%ID, topo%mesh(1)%ID,domain_ghostsize,vorticity)
+       ! Ghost values for vorticity
+       call ppm_map_field_ghost_get(topo%ID,topo%mesh(1)%ID,domain_ghostsize,info)
+       call ppm_map_field_push(topo%ID,topo%mesh(1)%ID,vorticity, 3, info)
+       call ppm_map_field_send(info)
+       call ppm_map_field_pop(topo%ID,topo%mesh(1)%ID,vorticity,3,domain_ghostsize,info)
+
+       !============================================================ 
+       ! Solve Poisson for the new vorticity --> velocity
+       !============================================================ 
+       ! Compute velocity from vorticity
+       ! Two steps:
+       ! - solve Poisson for stream_function and update velocity from stream_function : everything is supposed
+       ! to be done in ppm routine; indeed we do not have to deal with stream_function.
+       ! - update velocity to fit with required boundary values
+       ! Solve poisson to find velocity 
+       call solve_poisson(vorticity,velocity,topo%ID,topo%mesh(1)%ID,domain_ghostsize)
+       velocity(c_X,:,:,:,:) = velocity(c_X,:,:,:,:) + fit_velo
+
+       !============================================================ 
+       ! Penalize velocity on the sphere and on the boundaries
+       !============================================================ 
+       call penalise_velocity(velocity,time_step,chi_boundary,chi_sphere)
+       call ppm_map_field_ghost_get(topo%ID,topo%mesh(1)%ID,domain_ghostsize,info)
+       call ppm_map_field_push(topo%ID,topo%mesh(1)%ID,velocity, 3, info)
+       call ppm_map_field_send(info)
+       call ppm_map_field_pop(topo%ID,topo%mesh(1)%ID,velocity,3,domain_ghostsize,info)
+       
+       !============================================================ 
+       ! Compute the new "penalized" vorticity
+       !============================================================ 
+       call curlDF4(velocity,vorticity,resolution,grid_step)
+       call ppm_map_field_ghost_get(topo%ID,topo%mesh(1)%ID,domain_ghostsize,info)
+       call ppm_map_field_push(topo%ID,topo%mesh(1)%ID,vorticity,3,info)
+       call ppm_map_field_send(info)
+       call ppm_map_field_pop(topo%ID,topo%mesh(1)%ID,vorticity,3,domain_ghostsize,info)
+
+       !============================================================ 
+       ! Compute the diagnostics, Noca method
+       !============================================================ 
+       call nocaForces(force,velocity,vorticity,1./Re,coordMin,grid_step,time_step,dvol)
+       
+       if(rank == 0) then
+          write(*,'(i5,a,3f10.5)') iter, ' drag: ', force
+          write(10,'(11e14.5)') current_time,force
+       end if
+
+       if(verbose) then 
+          memoryUsed = memoryUsed + getMemoryUsedForParticles()
+          write(*,'(a,i5,a,i10)') "[", rank,"] end of iter ",iter
+          write(*,'(a,i5,a,f10.4)')  "[", rank,"] simulation time : ", MPI_WTime()-elapsed_time
+          write(*,'(a,i5,a,f10.5)')  "[", rank,"] current time step : ", time_step
+          write(*,'(a,i5,a,f10.4)')  "[", rank,"] Memory used : ",  memoryUsed
+       end if
+       
+       ! Update time
+       current_time = current_time + time_step
+       
+       ! Compute the new time step according to vorticity maximum value
+       call updateTimeStep(time_step,vorticity,dtMax)
+       
+       ! Output every 10 time unit ...
+       if(mod(current_time,20.).lt.time_step) then
+          call printToVTK("run",iter,velocity,vorticity,topo,grid_step)
+       end if
+       
+       !current_time = final_time
+       iter = iter+1
+       call MPI_BARRIER(MPI_COMM_WORLD,info)
+    enddo
+     
+    if(rank==0) close(10)
+
+  end subroutine timeLoop
+
+  subroutine updateTimeStep(dt,ref_field,dtMax)
+    
+    real(mk), dimension(:,:,:,:,:), pointer :: ref_field
+    real(mk), intent(inout) :: dt
+    real(mk),intent(in)::dtMax
+
+    real(mk) :: local_max,omega_max
+    
+    local_max = maxval(ref_field(:,:,:,:,1)) ! We ignore the ppm nsubs stuff ...
+    call MPI_Reduce(local_max,omega_max,1,MPI_DOUBLE_PRECISION,MPI_MAX,0,MPI_COMM_WORLD,info)
+    if(rank == 0) then
+       dt = min(0.25/omega_max,dtMax)
+    end if
+    call MPI_bcast(dt,1,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,info)
+  end subroutine updateTimeStep
+
+end module ppmNavierStokes3D
diff --git a/HySoP/src/parmesTools.f90 b/HySoP/src/parmesTools.f90
new file mode 100755
index 0000000000000000000000000000000000000000..e48a4e7badb9f4420dbee213854c8b10a67a1a17
--- /dev/null
+++ b/HySoP/src/parmesTools.f90
@@ -0,0 +1,20 @@
+!> Some useful tools for assertion, profiling ...
+module parmesTools
+
+  use client_data
+  implicit none
+
+  contains
+
+    subroutine parmesAssert(var,value,message)
+      integer, intent(in) :: var
+      integer, intent(in) :: value
+      character(len=*) :: message
+      
+      if(var/=value) then
+         write (*,'(a,i3,a,a)') '[',rank, "] Parmes error: ", message
+         stop 
+      end if
+    end subroutine parmesAssert
+
+end module parmesTools
diff --git a/HySoP/src/ppmInterface/CMakeLists.txt b/HySoP/src/ppmInterface/CMakeLists.txt
new file mode 100755
index 0000000000000000000000000000000000000000..158812ec4b61030239adea8df81e7201610cbcfa
--- /dev/null
+++ b/HySoP/src/ppmInterface/CMakeLists.txt
@@ -0,0 +1,21 @@
+
+find_package(PPMCore 1.2 REQUIRED)
+include_directories(${PPMCore_INCLUDE_DIRS})
+set(LIBS ${LIBS} ${PPMCore_LIBRARY})
+if(VERBOSE_MODE)
+  message(STATUS "Found PPM version ${PPMCore_VERSION}: ${PPMCore_LIBRARY}")
+  message(STATUS "PPM headers location: ${PPMCore_INCLUDE_DIRS}")
+endif(VERBOSE_MODE)
+
+# --- PPM Numerics ---
+if(WITH_PPMNumerics)
+  find_package(PPMNumerics 1.2 REQUIRED)
+  include_directories(${PPMNumerics_INCLUDE_DIRS})
+  set(LIBS ${LIBS} ${PPMNumerics_LIBRARY})
+  if(VERBOSE_MODE)
+    message(STATUS "Found PPMNumerics version ${PPMNUmerics_VERSION}: ${PPMNumerics_LIBRARY}")
+    message(STATUS "PPMNumerics header location: ${PPMNumerics_INCLUDE_DIRS}")
+  endif(VERBOSE_MODE)
+endif()
+
+
diff --git a/HySoP/src/ppmInterface/Fields.f90 b/HySoP/src/ppmInterface/Fields.f90
new file mode 100755
index 0000000000000000000000000000000000000000..31a7524274b1525743de1cb6ea8f676fc34fc71e
--- /dev/null
+++ b/HySoP/src/ppmInterface/Fields.f90
@@ -0,0 +1,143 @@
+!> Declaration, allocation of all the fields on the grid.
+module Fields
+  
+  use client_data
+  use mpi
+  use PPMFields
+
+  implicit none
+
+  !> Velocity 
+  real(mk), dimension(:,:,:,:), pointer :: velocity => NULL()
+  !> Vorticity
+  real(mk), dimension(:,:,:,:), pointer :: vorticity =>NULL()
+  !> Vorticity (a scalar in the 2D case)
+  real(mk), dimension(:,:,:), pointer :: vorticity2D =>NULL()
+  !> Stream function - Test purpose. Useless if ppm fft solver works as it is supposed to ...
+  ! real(mk), dimension(:,:,:,:,:), pointer :: stream_function =>NULL()
+  !> rhs of vorticity eq (i.e. stretch + diffusion terms)
+  real(mk), dimension(:,:,:,:), pointer :: rhs =>NULL()
+  real(mk), dimension(:,:,:), pointer :: gauss =>NULL()
+  !> 
+  !real(mk), dimension(:,:,:,:,:), pointer :: vel_ex => NULL()
+  !> Scalar on the grid, test purpose for chi functions
+  real(mk),dimension(:,:,:),pointer::testFunc=>NULL()
+  !> Scalar on the grid
+  real(mk),dimension(:,:,:),pointer::scalar=>NULL()
+  
+contains
+  
+  !> Fields allocation. 
+  !! Warning : ghostpoints must be included in field (i.e. size = "real size" + 2*number of ghostpoints)
+  subroutine initFields(resolution,ghostsize)
+    
+    !> Required resolution for the fields (without ghosts)
+    integer, dimension(dime), intent(in) :: resolution
+    !> number of ghost points in each direction (ghost(c_X) = 2 means resolution(c_X)+ 4 points for the field)
+    integer, dimension(:),pointer:: ghostsize
+    
+    integer::istat
+    ! Lower and upper bounds for fields
+    integer, dimension(dime) :: ldl, ldu
+    ! nsublist from ppm topo. Assumed to be equal to 1 see Topology.f90
+
+    ldl = 1 - ghostsize
+    ldu = resolution + ghostsize
+
+
+    if(dime==2) then
+       call initPPMFields2D(resolution,ghostsize)
+       velocity => PPMvelocity2D
+       vorticity2D => PPMvorticity2D
+       scalar => PPMscalar2D
+       allocate(gauss(ldl(c_X):ldu(c_X),ldl(c_Y):ldu(c_Y),1), stat = istat)
+       allocate(rhs(dime,ldl(c_X):ldu(c_X),ldl(c_Y):ldu(c_Y),1), stat = istat)
+       
+    else if(dime == 3) then
+       call initPPMFields3D(resolution,ghostsize)
+       velocity => PPMvelocity3D(:,:,:,:,1)
+       vorticity => PPMvorticity3D(:,:,:,:,1)
+       rhs => PPMrhs3D(:,:,:,:,1)
+       allocate(gauss(ldl(c_X):ldu(c_X),ldl(c_Y):ldu(c_Y),ldl(c_Z):ldu(c_Z)), stat = istat)
+    end if
+
+    allocate(testFunc(resolution(c_X),resolution(c_Y),resolution(c_Z)))
+
+
+!!$    ! Velocity ...
+!!$    allocate(velocity(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist),stat = istat)
+!!$    if(istat.ne.0) stop 'Field allocation error for velocity'
+!!$    ! Vorticity
+!!$    allocate(vorticity(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
+!!$    if(istat.ne.0) stop 'Field allocation error for vorticity'
+!!$    ! rhs
+!!$    allocate(rhs(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
+ 
+!!$    if(istat.ne.0) stop 'Field allocation error for rhs'
+!!$    
+!!$    !allocate(stream_function(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
+!!$    !if(istat.ne.0) stop 'stream_function allocation error for rhs'
+    
+!!    allocate(testFunc(ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3)), stat = istat)
+ !!   if(istat.ne.0) stop 'Field allocation error for testFunc'
+    ! Scalar
+    !allocate(scalar(ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
+    !if(istat.ne.0) stop 'Field allocation error for scalar'
+  end subroutine initFields
+
+  !> compute the size of the memory used to save fields
+  function getMemoryUsedForFields()
+    real(mk) :: getMemoryUsedForFields 
+    getMemoryUsedForFields = sizeof(velocity)+sizeof(vorticity)+sizeof(rhs)+sizeof(testfunc)+sizeof(scalar)
+    getMemoryUsedForFields = getMemoryUsedForFields*1.e-6
+    if(verbose) then
+       write(*,'(a,i3,a,f10.4,a)') &
+            '[',rank,'] Fields have been initialized.  Memory used :', getMemoryUsedForFields, ' MB.'
+    end if
+  end function getMemoryUsedForFields
+
+  !> Shift the velocity x component according to a required flow rate
+  !> \f[ velocity_x = velocity_x + shift \f]
+  !> with
+  !> \f[shift = \frac{reqFlowRate-currentFlowRate}{S_x} \f]
+  !> \f$S_x\f$ being the surface where x=xmin (incoming flow)
+  subroutine shiftVelocityX(reqFlowRate,step,resolution,surf,coordMin,lower)
+    !> Required flow rate
+    real(mk), intent(in) :: reqFlowRate
+    !> Grid step
+    real(mk), dimension(dime), intent(in) :: step
+    !> local resolution
+    integer,dimension(dime),intent(in) :: resolution
+    !> Area of the surface for integration 
+    real(mk) ,intent(in) :: surf
+    !> Coordinates of the minimal point of the current domain
+    real(mk),dimension(dime),intent(in) :: coordMin
+    !> lower bound of the physical domain
+    real(mk),dimension(dime),intent(in) :: lower
+
+    real(mk) :: localShift,globalShift
+    integer :: info
+    localShift = 0 
+    
+    ! We compute the current flow rate through surface x = xmin, for the x component of the velocity : 
+    ! FlowRate = sum(surf x=xmin)( velocity(c_X) ) * step(c_Z)*step(c_Y)
+    ! And use this value to shift the velocity(c_X) to have :
+    ! RequiredFlowRate = FlowRate + Surf(x=xmin)*globalShift
+ 
+    ! Step 1 : compute flowRate
+    if(abs(coordMin(c_X)-lower(c_X)) <= 2.*epsilon(globalShift) ) then
+       ! Compute mean value of the velocity through a face ortho. to x dir.
+       ! Warning: ghost points must be excluded
+       localShift = sum(velocity(c_X,1,1:resolution(c_Y)-1,1:resolution(c_Z)-1))
+    end if
+    ! Step 2 : reduction over all mpi processus ...
+    call MPI_ALLReduce(localShift,globalShift,1,MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD,info)
+    ! Step 3 : set global shift
+    globalShift = (reqFlowRate-globalShift*step(c_Y)*step(c_Z))/surf 
+    
+    ! Step 4 : update velocity(c_X)
+    velocity(c_X,:,:,:) = velocity(c_X,:,:,:) + globalShift
+
+  end subroutine shiftVelocityX
+
+end module Fields
diff --git a/HySoP/src/ppmInterface/PPMFields.f90 b/HySoP/src/ppmInterface/PPMFields.f90
new file mode 100755
index 0000000000000000000000000000000000000000..bb2e45920c48844bfcb8f277e28196ce9b37aac5
--- /dev/null
+++ b/HySoP/src/ppmInterface/PPMFields.f90
@@ -0,0 +1,80 @@
+!> Declaration, allocation of all the fields on the grid.
+module PPMFields
+  
+  use client_data
+  use client_topology, only: nsublist
+
+  implicit none
+
+  !> Velocity 3D (PPM-style storage) 
+  real(mk), dimension(:,:,:,:,:), pointer :: PPMvelocity3D => NULL()
+  !> Velocity 2D (PPM-style storage)
+  real(mk), dimension(:,:,:,:), pointer :: PPMvelocity2D => NULL()
+  !> Vorticity 3D (PPM-style storage) 
+  real(mk), dimension(:,:,:,:,:), pointer :: PPMvorticity3D => NULL()
+  !> Vorticity 2D (PPM-style storage)
+  real(mk), dimension(:,:,:), pointer :: PPMvorticity2D => NULL()
+  !> RHS 3D (PPM-style storage) 
+  real(mk), dimension(:,:,:,:,:), pointer :: PPMrhs3D => NULL()
+  !> A Scalar on a 3D grid (PPM-style storage) 
+  real(mk), dimension(:,:,:,:), pointer :: PPMscalar3D => NULL()
+  !> A Scalar on a 2D grid (PPM-style storage) 
+  real(mk), dimension(:,:,:), pointer :: PPMscalar2D => NULL()
+  
+contains
+  
+  !> Fields allocation. 
+  !! Warning : ghostpoints must be included in field (i.e. size = "real size" + 2*number of ghostpoints)
+  subroutine initPPMFields3D(resolution,ghostsize)
+    
+    !> Required resolution for the fields (without ghosts)
+    integer, dimension(dime), intent(in) :: resolution
+    !> number of ghost points in each direction (ghost(c_X) = 2 means resolution(c_X)+ 4 points for the field)
+    integer, dimension(:),pointer:: ghostsize
+    
+    integer::istat
+    ! Lower and upper bounds for fields
+    integer, dimension(dime) :: ldl, ldu
+    ! nsublist from ppm topo. Assumed to be equal to 1 see Topology.f90
+    
+    ldl = 1 - ghostsize
+    ldu = resolution + ghostsize
+    ! Velocity ...
+    allocate(PPMvelocity3D(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist),stat = istat)
+    if(istat.ne.0) stop 'Field allocation error for velocity (PPM)'
+    ! Vorticity
+    allocate(PPMvorticity3D(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
+    if(istat.ne.0) stop 'Field allocation error for vorticity (PPM))'
+    ! rhs
+    allocate(PPMrhs3D(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat)
+    if(istat.ne.0) stop 'Field allocation error for rhs (PPM))'
+
+  end subroutine initPPMFields3D
+
+  subroutine initPPMFields2D(resolution,ghostsize)
+    
+    !> Required resolution for the fields (without ghosts)
+    integer, dimension(dime), intent(in) :: resolution
+    !> number of ghost points in each direction (ghost(c_X) = 2 means resolution(c_X)+ 4 points for the field)
+    integer, dimension(:),pointer:: ghostsize
+    
+    integer::istat
+    ! Lower and upper bounds for fields
+    integer, dimension(dime) :: ldl, ldu
+    ! nsublist from ppm topo. Assumed to be equal to 1 see Topology.f90
+    
+    ldl = 1 - ghostsize
+    ldu = resolution + ghostsize
+    ! Velocity ...
+    allocate(PPMvelocity2D(dime,ldl(1):ldu(1),ldl(2):ldu(2),1),stat = istat)
+    if(istat.ne.0) stop 'Field allocation error for velocity (PPM)'
+    ! Vorticity  (a sacalar in 2D)
+    allocate(PPMvorticity2D(ldl(1):ldu(1),ldl(2):ldu(2),1), stat = istat)
+    if(istat.ne.0) stop 'Field allocation error for vorticity (PPM))'
+    ! Scalar
+    allocate(PPMscalar2D(ldl(1):ldu(1),ldl(2):ldu(2),1), stat = istat)
+    if(istat.ne.0) stop 'Field allocation error for scalar (PPM))'
+
+  end subroutine initPPMFields2D
+
+end module PPMFields
diff --git a/HySoP/src/ppmInterface/Particles.f90 b/HySoP/src/ppmInterface/Particles.f90
new file mode 100755
index 0000000000000000000000000000000000000000..1b7b12b5fffc5b3a28eb5736e19076196b815ef3
--- /dev/null
+++ b/HySoP/src/ppmInterface/Particles.f90
@@ -0,0 +1,1445 @@
+!> Functions dealing with particles :
+!> - initialisation, creation
+!> - remesh and interpolation
+!> - integration (push) 
+module Particles
+  
+  use ppm_module_rmsh, only : ppm_rmsh_create_part,ppm_interp_m2p, ppm_interp_p2m !, ppm_rmsh_remesh
+  use ppm_module_map_field_ghost, only:ppm_map_field_ghost_get!, ppm_map_field_ghost_put
+  use ppm_module_map_field, only : ppm_map_field_push,ppm_map_field_send,ppm_map_field_pop
+  use ppm_module_impose_part_bc
+  use client_data
+  use ppm_module_data, only : ppm_param_rmsh_kernel_mp4
+  use ppm_module_map_part
+  use ppm_module_util_dbg
+
+  implicit none
+
+  private
+
+  public initNSSolver_particles,getMemoryUsedForParticles,ScalarSolver_particles,init_parts,&
+       countAndCreateParticles, countAndUpdateParticles,PPMupdateParticles3D, PPMupdateParticles2D,&
+       freeParticles,createParticlesEverywhere,npart,RK4_2D,Ppmremesh2d,RK2_2D,PPMupdateParticles2DScalar,&
+       RK2_2DScalar,PPMremesh2DScalar,remesh2D,createParticlesEverywhereScalar,RK4_2Dscalar
+
+  !> cutoff values (threshold for vorticity values for which particles are created)
+  real(mk), dimension(2) :: cutoff
+  !> Current number of particles
+  integer :: npart
+  !> Particles positions
+  real(mk), dimension(:,:), pointer :: xp=>NULL()
+  !> Particles  strength (ie var carried by part, vorticity indeed)
+  real(mk), dimension(:,:), pointer :: omp=>NULL()
+  !> Particle velocities
+  real(mk), dimension(:,:), pointer :: velop=>NULL()
+  !> Particles RHS term
+  real(mk), dimension(:,:), pointer :: rhsp => NULL()
+  !> Particles scalar term
+  real(mk), dimension(:), pointer :: scalar_p => NULL()
+  !> Backup vector for RK schemes
+  real(mk), dimension(:,:), pointer :: buffer=>NULL(),buffer2=>NULL(),buffer3=>NULL()
+  
+  !> Size of buffer, i.e. more or less number of particles at the previous time step
+  integer :: buffer_size
+  !> Kernel for remesh ...
+  integer :: kernel
+
+contains
+
+  subroutine ScalarSolver_particles(scalar,velocity,dt,topoid,meshid,ghostsize,resetpos,step,coordMin,resolution)
+    
+    !> Vorticity field, on the grid
+    real(mk),dimension(:,:,:),pointer :: scalar
+    !> Velocity field, on the grid
+    real(mk),dimension(:,:,:,:),pointer :: velocity
+    !> Current time step
+    real(mk),intent(in) :: dt
+    !> Current topo id
+    integer, intent(in) :: topoid
+    !> Current mesh id
+    integer, intent(in) :: meshid
+    !> Number of ghost points
+    integer,  dimension(:),pointer:: ghostsize 
+    !> bool to reset (or not) particles positions
+    logical, intent(in) :: resetpos
+    real(mk),dimension(dim3),intent(in) :: coordMin,step
+    integer, dimension(dim3),intent(in) :: resolution
+    !> Error status
+    integer :: dir
+    
+    do dir=1,dime
+       !call ppm_rmsh_create_part(topoid,meshid,xp,npart,scalar_p,scalar,cutoff,info,resetpos,&
+       !  field_wp=velocity,wp=velop,lda2=dime)
+       call reset_parts(scalar,resolution,coordMin,step)
+       write(*,'(a,i5,a,i8)') '[',rank,'] initialisation with ', npart,' particles'
+       ! Integrate
+       call push_split_particles(dir,dt,topoid,topoid,ghostsize,velocity,coordMin,step)
+       ! Remesh
+       call remesh_split_mp6(scalar,dir,step,coordMin)
+          
+       if(dir == c_X) then
+          scalar(-ghostsize(c_X)+1:ghostsize(c_X)+1,:,:) = scalar(-ghostsize(c_X)+1:ghostsize(c_X)+1,:,:) &
+               + scalar(resolution(c_X)-ghostsize(c_X):resolution(c_X)+ghostsize(c_X),:,:)
+          scalar(resolution(c_X)-ghostsize(c_X):resolution(c_X)+ghostsize(c_X),:,:) = scalar(-ghostsize(c_X)&
+               +1:ghostsize(c_X)+1,:,:)
+       else if(dir == c_Y) then 
+          scalar(:,-ghostsize(c_Y)+1:ghostsize(c_Y)+1,:) = scalar(:,-ghostsize(c_Y)+1:ghostsize(c_Y)+1,:) &
+               + scalar(:,resolution(c_Y)-ghostsize(c_Y):resolution(c_Y)+ghostsize(c_Y),:)
+          scalar(:,resolution(c_Y)-ghostsize(c_Y):resolution(c_Y)+ghostsize(c_Y),:) = scalar(:,-ghostsize(c_Y)&
+               +1:ghostsize(c_Y)+1,:)
+       else
+          scalar(:,:,-ghostsize(c_Z)+1:ghostsize(c_Z)+1) = scalar(:,:,-ghostsize(c_Z)+1:ghostsize(c_Z)+1) &
+               + scalar(:,:,resolution(c_Z)-ghostsize(c_Z):resolution(c_Z)+ghostsize(c_Z))
+          scalar(:,:,resolution(c_Z)-ghostsize(c_Z):resolution(c_Z)+ghostsize(c_Z)) = scalar(:,:,-ghostsize(c_Z)&
+               +1:ghostsize(c_Z)+1)
+       end if
+    end do
+    ! Ghost values for vorticity
+!!$    call ppm_map_field_ghost_get(topoid,meshid,ghostsize,info)
+!!$    call ppm_map_field_push(topoid,meshid,scalar,info)
+!!$    call ppm_map_field_send(info)
+!!$    call ppm_map_field_pop(topoid,meshid,scalar,ghostsize,info)
+    
+  end subroutine ScalarSolver_particles
+
+  subroutine init_parts(scalar,velocity,resolution,coordMin,step)
+    real(mk),dimension(:,:,:), pointer :: scalar
+    real(mk),dimension(:,:,:,:),pointer :: velocity
+    real(mk),dimension(dim3),intent(in)::coordMin,step
+    integer, dimension(dim3),intent(in)::resolution
+
+    integer :: i,j,k,current_part
+    real(mk),dimension(dim3) :: coord
+    
+    current_part = 1
+    npart = product(resolution-1)
+    allocate(xp(dime,npart),velop(dime,npart),scalar_p(npart),buffer(dime,npart))
+    do k=1,resolution(c_Z)-1
+       coord(c_Z) = coordMin(c_Z) + (k-1)*step(c_Z)
+       do j=1,resolution(c_Y)-1
+          coord(c_Y) = coordMin(c_Y) + (j-1)*step(c_Y)
+          do i=1,resolution(c_X)-1
+             coord(c_X) = coordMin(c_X) + (i-1)*step(c_X)
+             xp(:,current_part) = coord(:)
+             velop(:,current_part) = velocity(:,i,j,k)
+             scalar_p(current_part) = scalar(i,j,k)
+             current_part = current_part + 1
+
+          end do
+       end do
+    end do
+
+    
+  endsubroutine init_parts
+
+  subroutine reset_parts(scalar,resolution,coordMin,step)
+    
+    real(mk),dimension(:,:,:), pointer :: scalar
+    real(mk),dimension(dime),intent(in)::coordMin,step
+    integer, dimension(dime),intent(in)::resolution
+
+    integer :: i,j,k,current_part
+    real(mk),dimension(dim3) :: coord
+    
+    current_part = 1
+    do k=1,resolution(c_Z)-1
+       coord(c_Z) = coordMin(c_Z) + (k-1)*step(c_Z)
+       do j=1,resolution(c_Y)-1
+          coord(c_Y) = coordMin(c_Y) + (j-1)*step(c_Y)
+          do i=1,resolution(c_X)-1
+             coord(c_X) = coordMin(c_X) + (i-1)*step(c_X)
+             xp(:,current_part) = coord(:)
+             scalar_p(current_part) = scalar(i,j,k)
+             current_part = current_part + 1
+          end do
+       end do
+    end do
+
+  endsubroutine reset_parts
+
+
+  !> Set required parameters for particles creations
+  subroutine initNSSolver_particles()
+    
+    ! Cutoff : lower and upper bound for cutoff, i.e. for each value of the ref. field between cutoff(1) and cutoff(2), a particle will be
+    ! created.
+    cutoff(1) = 1.d-8
+    cutoff(2) = 1e9!00000
+    kernel = ppm_param_rmsh_kernel_mp4
+    npart = 0
+    buffer_size = npart
+  end subroutine initNSSolver_particles
+
+  
+  
+  !> PPM-based creation/update of particles distribution
+  subroutine PPMupdateParticles3D(field_on_grid,resetpos,topoid,meshid,vel)
+    !> The field used to create particles (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: field_on_grid
+    !>  true to reset distribution else false
+    logical, intent(in) :: resetpos
+    !> topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer :: info
+    !> velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: vel
+    info = 0
+
+    ! -------->  The following call will allocate memory for xp, omp and velop
+    ! -------->  And deallocation will be handled by ppm. (?)
+    ! -------->  Other variables carried by particles but not allocated during ppm_rmsh_create call
+    ! -------->  is there a better way to do this, with map_push or other ppm routines? No according to Omar. 
+    ! Call ppm func to create the particles distribution
+    call ppm_rmsh_create_part(topoid,meshid,xp,npart,omp,dime,field_on_grid,cutoff,info,resetpos,&
+         field_wp=vel,wp=velop,lda2=dime)
+    write(*,'(a,i5,a,i8,a)') '[',rank,'] initialisation with ', npart,' particles'
+
+    !if(associated(xp)) print *, "xp shape:", rank, " ", shape(xp), npart
+    ! -------->  Ok, from here all vars carried by particles must have the following shape : dime,npart
+    !> -------->  mesh to particles for velocity  => done in ppm_rmsh_create_part 
+    !    call ppm_interp_m2p(topoid, meshid, xp, npart, velop, dime, kernel, ghostsize, vel,info)
+    !    print *, 'End of update particles'
+
+  end subroutine PPMupdateParticles3D
+
+  subroutine PPMupdateParticles2DScalar(field_on_grid,resetpos,topoid,meshid,vel)
+    !> The field used to create particles (must be in PPM-style storage)
+    real(mk), dimension(:,:,:), pointer :: field_on_grid
+    !>  true to reset distribution else false
+    logical, intent(in) :: resetpos
+    !> topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer :: info,i
+    !> velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: vel
+    info = 0
+
+    ! -------->  The following call will allocate memory for xp, omp and velop
+    ! -------->  And deallocation will be handled by ppm. (?)
+    ! -------->  Other variables carried by particles but not allocated during ppm_rmsh_create call
+    ! -------->  is there a better way to do this, with map_push or other ppm routines? No according to Omar. 
+    ! Call ppm func to create the particles distribution
+    call ppm_rmsh_create_part(topoid,meshid,xp,npart,scalar_p,field_on_grid,cutoff,info,.true.,&
+         field_wp=vel,wp=velop,lda2=dime)
+    write(*,'(a,i5,a,i8,a)') '[',rank,'] initialisation with ', npart,' particles'
+ 
+    open(45,file="omp") ! Output only for one process
+    do i = 1,npart
+       write(45,'(3(11e14.5))') xp(c_X,i),scalar_p(i)
+    end do
+    close(45)
+
+
+   !if(associated(xp)) print *, "xp shape:", rank, " ", shape(xp), npart
+    ! -------->  Ok, from here all vars carried by particles must have the following shape : dime,npart
+    !> -------->  mesh to particles for velocity  => done in ppm_rmsh_create_part 
+    !    call ppm_interp_m2p(topoid, meshid, xp, npart, velop, dime, kernel, ghostsize, vel,info)
+    !    print *, 'End of update particles'
+
+  end subroutine PPMupdateParticles2DScalar
+
+  subroutine PPMupdateParticles2D(field_on_grid,resetpos,topoid,meshid,vel)
+    !> The field used to create particles (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: field_on_grid
+    !>  true to reset distribution else false
+    logical, intent(in) :: resetpos
+    !> topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer :: info
+    !> velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: vel
+    info = 0
+
+    ! -------->  The following call will allocate memory for xp, omp and velop
+    ! -------->  And deallocation will be handled by ppm. (?)
+    ! -------->  Other variables carried by particles but not allocated during ppm_rmsh_create call
+    ! -------->  is there a better way to do this, with map_push or other ppm routines? No according to Omar. 
+    ! Call ppm func to create the particles distribution
+    call ppm_rmsh_create_part(topoid,meshid,xp,npart,omp,dime,field_on_grid,cutoff,info,resetpos,&
+         field_wp=vel,wp=velop,lda2=dime)
+    write(*,'(a,i5,a,i8,a)') '[',rank,'] initialisation with ', npart,' particles'
+
+    !if(associated(xp)) print *, "xp shape:", rank, " ", shape(xp), npart
+    ! -------->  Ok, from here all vars carried by particles must have the following shape : dime,npart
+    !> -------->  mesh to particles for velocity  => done in ppm_rmsh_create_part 
+    !    call ppm_interp_m2p(topoid, meshid, xp, npart, velop, dime, kernel, ghostsize, vel,info)
+    !    print *, 'End of update particles'
+
+  end subroutine PPMupdateParticles2D
+
+  !> time integration
+  !> Advection of particles, more or less a copy of Adrien's code.
+  subroutine push_particles(dt,topoid,meshid,ghostsize,vel,rhs)
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer:: ghostsize
+    ! velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: vel
+     ! Secondary field to be mapped to particles (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: rhs
+    
+    ! Compute new particles positions ...
+    ! Integrate ... ===> update omp
+    ! Todo: switch process between the different methods, leaded by a user-defined var? Later ...
+    ! call runge_kutta_2(dt,topoid,meshid,ghostsize,vel,rhs)
+    call RK4_3D(dt,topoid,meshid,ghostsize,vel,rhs)
+  end subroutine push_particles
+  
+  subroutine PPMremesh3D(topoid,meshid,ghostsize,field)
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer :: ghostsize
+    ! vorticity on grid, (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: field
+    
+    integer info
+    info = -1
+    !call ppm_dbg_print_d(topoid,ghostlayer,1,1,info,xp,npart)
+    call ppm_interp_p2m(topoid,meshid,xp,npart,omp,dime,kernel,ghostsize,field,info)
+    if(info.ne.0) then
+       stop 'Particles: ppm remesh error '
+    end if
+  end subroutine PPMremesh3D
+  
+  subroutine PPMremesh2D(topoid,meshid,ghostsize,field)
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer :: ghostsize
+    ! vorticity on grid, (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: field
+    
+    integer info
+    info = -1
+    !call ppm_dbg_print_d(topoid,ghostlayer,1,1,info,xp,npart)
+    call ppm_interp_p2m(topoid,meshid,xp,npart,omp,dime,kernel,ghostsize,field,info)
+    if(info.ne.0) then
+       stop 'Particles: ppm remesh error '
+    end if
+  end subroutine PPMremesh2D
+  subroutine PPMremesh2DScalar(topoid,meshid,ghostsize,field)
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer :: ghostsize
+    ! vorticity on grid, (must be in PPM-style storage)
+    real(mk), dimension(:,:,:), pointer :: field
+    
+    integer info
+    info = -1
+    !call ppm_dbg_print_d(topoid,ghostlayer,1,1,info,xp,npart)
+    call ppm_interp_p2m(topoid,meshid,xp,npart,scalar_p,kernel,ghostsize,field,info)
+    if(info.ne.0) then
+       stop 'Particles: ppm remesh error '
+    end if
+  end subroutine PPMremesh2DScalar
+  
+  !> Runge Kutta 2 for positions and 1 for vorticity
+  subroutine RK2_3D(dt,topoid,meshid,ghostsize,vel,rhs)
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer:: ghostsize
+    ! velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: vel
+     ! Secondary field to be mapped to particles (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: rhs
+    
+    !> local loop indices
+    integer :: i,newNpart,k
+    integer :: info
+    
+    ! buffer must be of the same size as xp so if the number of particles has increased in comparison with previous step,
+    ! we reallocate secondary fields. 
+    ! Note Franck : Either we allocate buffer at each time step or we reallocate only when buffer_size increase. 
+    ! Since memory is our main problem, we allocate/deallocate at each time step, for the moment. 
+    !    if(buffer_size.lt.npart) then
+    !      deallocate(buffer) 
+    allocate(buffer(dime,npart))
+    buffer_size = npart
+
+    do i=1,npart
+       do k=1,dime
+          buffer(k,i)=xp(k,i)
+          xp(k,i)=buffer(k,i)+0.5*dt*velop(k,i)
+       end do
+    end do
+    ! Particles positions have changed ... we must map between domains
+    !    call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
+    newNpart = 0
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+    !    if(size(rhsp,2).lt.npart) then
+    !      deallocate(rhsp) 
+    allocate(rhsp(dime,npart))
+    !  end if
+!!    print *, 'max min omp start', maxval(omp),minval(omp),maxval(rhsp),minval(rhsp)
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+    !> for rhs of vorticity eq.
+    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
+    ! Update positions according to the new velocity and vorticity with new rhs
+    
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+dt*velop(k,i)
+          omp(k,i)=omp(k,i)+dt*rhsp(k,i)
+       end do
+    end do
+!!    print *, 'max min omp stop', maxval(omp),minval(omp), maxval(rhsp),minval(rhsp)
+
+    ! Free memory as soon as possible ...
+    deallocate(buffer,rhsp)
+    ! Vorticity mapping ...
+    newNpart = 0
+
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    npart = newNpart
+
+  end subroutine RK2_3D
+
+  !> Runge Kutta 4
+  subroutine RK4_3D(dt,topoid,meshid,ghostsize,vel,rhs)
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer::ghostsize
+    ! velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: vel
+     ! Secondary field to be mapped to particles (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:,:), pointer :: rhs
+    
+    !> local loop indices
+    integer :: i, newNpart,k
+    real(mk) :: alpha
+    !> error status
+    integer :: info
+    ! buffer is used to save intermediate values for xp and omp, thus buffersize=2*npart
+
+    allocate(buffer(dime,npart),buffer2(dime,npart),buffer3(dime,npart),rhsp(dime,npart))
+    buffer_size=npart
+    
+    !> Compute current rhs on particles
+    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
+    ! First RK stage
+    ! Velocity is up to date, following the call to update_particles
+    alpha=0.5*dt
+    do i=1,npart
+       do k=1,dime
+          buffer(k,i)=xp(k,i)
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=velop(k,i)
+          buffer3(k,1)=rhsp(k,i)
+       end do
+    end do
+    ! Particles positions have changed ... we must map between domains
+    !    call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
+    newNpart = 0
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_push(buffer3,dime,npart,info)    
+    call ppm_map_part_push(rhsp,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(rhsp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer3,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+ 
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
+
+    !! Second RK4 stage, with the updated velocity
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=buffer2(k,i)+2.*velop(k,i)
+          buffer3(k,i)=buffer3(k,i)+2.*rhsp(k,i)
+       end do
+    end do
+    newNpart = 0
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_push(buffer3,dime,npart,info)    
+    call ppm_map_part_push(rhsp,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(rhsp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer3,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
+
+    !! Third RK4 stage
+    alpha=dt
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=buffer2(k,i)+2.*velop(k,i)
+          buffer3(k,i)=buffer3(k,i)+2.*rhsp(k,i)
+       end do
+    end do
+    newNpart = 0
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_push(buffer3,dime,npart,info)    
+    call ppm_map_part_push(rhsp,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(rhsp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer3,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+    call ppm_interp_m2p(topoid,meshid,xp,npart,rhsp,dime,kernel,ghostsize,rhs,info)
+    
+    !! Last RK4 stage
+    alpha=dt/6.
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*buffer2(k,i)+alpha*velop(k,i)
+          omp(k,i)=omp(k,i)+alpha*buffer3(k,i)+alpha*rhsp(k,i)
+       end do
+    end do
+    
+    ! Free memory as soon as possible ...
+    deallocate(buffer,buffer2,buffer3,rhsp)
+    ! Vorticity mapping ...
+    newNpart = 0
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    npart = newNpart
+    
+  end subroutine RK4_3D
+
+  !> Runge Kutta 4, for 2D domain (i.e rhs == 0).
+  subroutine RK4_2D(dt,topoid,meshid,ghostsize,vel)
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer::ghostsize
+    ! velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: vel
+    
+    !> local loop indices
+    integer :: i, newNpart,k
+    real(mk) :: alpha
+    !> error status
+    integer :: info
+    ! buffer is used to save intermediate values for xp and omp, thus buffersize=2*npart
+
+    allocate(buffer(dime,npart),buffer2(dime,npart))
+    buffer_size=npart
+   
+    ! First RK stage
+    ! Velocity is up to date, following the call to update_particles
+    alpha=0.5*dt
+    do i=1,npart
+       do k=1,dime
+          buffer(k,i)=xp(k,i)
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=velop(k,i)
+       end do
+    end do
+    ! Particles positions have changed ... we must map between domains
+    call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
+    newNpart = 0
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    print *, "NEW", npart,newNpart
+    npart = newNpart
+
+ 
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+
+    !! Second RK4 stage, with the updated velocity
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=buffer2(k,i)+2.*velop(k,i)
+       end do
+    end do
+    call ppm_impose_part_bc(topoid,xp,npart,info) 
+    newNpart = 0
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    print *, "NEW2", npart,newNpart
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+
+    !! Third RK4 stage
+    alpha=dt
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=buffer2(k,i)+2.*velop(k,i)
+       end do
+    end do
+    newNpart = 0
+    call ppm_impose_part_bc(topoid,xp,npart,info) 
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    print *, "NEW3", npart,newNpart
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+
+    !! Last RK4 stage
+    alpha=dt/6.
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*buffer2(k,i)+alpha*velop(k,i)
+       end do
+    end do
+    
+    ! Free memory as soon as possible ...
+    deallocate(buffer,buffer2)
+    ! Vorticity mapping ...
+    call ppm_impose_part_bc(topoid,xp,npart,info) 
+    newNpart = 0
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    print *, "NEW4", npart,newNpart
+    npart = newNpart
+    
+  end subroutine RK4_2D
+  
+  !> Runge Kutta 4, for 2D domain (i.e rhs == 0).
+  subroutine RK4_2DScalar(dt,topoid,meshid,ghostsize,vel)
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer::ghostsize
+    ! velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: vel
+    
+    !> local loop indices
+    integer :: i, newNpart,k
+    real(mk) :: alpha
+    !> error status
+    integer :: info
+    ! buffer is used to save intermediate values for xp and omp, thus buffersize=2*npart
+
+    allocate(buffer(dime,npart),buffer2(dime,npart))
+    buffer_size=npart
+   
+    ! First RK stage
+    ! Velocity is up to date, following the call to update_particles
+    alpha=0.5*dt
+    do i=1,npart
+       do k=1,dime
+          buffer(k,i)=xp(k,i)
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=velop(k,i)
+       end do
+    end do
+    ! Particles positions have changed ... we must map between domains
+    !call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
+    newNpart = 0
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+    !! Second RK4 stage, with the updated velocity
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=buffer2(k,i)+2.*velop(k,i)
+       end do
+    end do
+!    call ppm_impose_part_bc(topoid,xp,npart,info) 
+    newNpart = 0
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+
+    !! Third RK4 stage
+    alpha=dt
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+          buffer2(k,i)=buffer2(k,i)+2.*velop(k,i)
+       end do
+    end do
+    newNpart = 0
+    !call ppm_impose_part_bc(topoid,xp,npart,info) 
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_push(buffer2,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer2,dime,npart,newNpart,info)
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+
+    !! Last RK4 stage
+    alpha=dt/6.
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+alpha*buffer2(k,i)+alpha*velop(k,i)
+       end do
+    end do
+    
+    ! Scalar mapping ...
+    !call ppm_impose_part_bc(topoid,xp,npart,info) 
+    newNpart = 0
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    npart = newNpart
+    ! Free memory as soon as possible ...
+    deallocate(buffer,buffer2)
+
+  end subroutine RK4_2DSCALAR
+
+  !> Runge Kutta 4, for 2D domain (i.e rhs == 0).
+  subroutine RK2_2D(dt,topoid,meshid,ghostsize,vel)
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer::ghostsize
+    ! velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: vel
+    
+    !> local loop indices
+    integer :: i, newNpart,k
+    real(mk) :: alpha
+    !> error status
+    integer :: info
+    ! buffer is used to save intermediate values for xp and omp, thus buffersize=2*npart
+
+    allocate(buffer(dime,npart))
+    buffer_size=npart
+   
+    ! First RK stage
+    ! Velocity is up to date, following the call to update_particles
+    alpha=0.5*dt
+    do i=1,npart
+       do k=1,dime
+          buffer(k,i)=xp(k,i)
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+       end do
+    end do
+    ! Particles positions have changed ... we must map between domains
+  !  call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
+    newNpart = 0
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+
+    !! Second RK2 stage, with the updated velocity
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+dt*velop(k,i)
+       end do
+    end do
+    ! Vorticity mapping ...
+    !    call ppm_impose_part_bc(topoid,xp,npart,info) 
+    newNpart = 0
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(omp,dime,npart,info)    ! vorticity
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(omp,dime,npart,newNpart,info)
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    npart = newNpart
+    ! Free memory as soon as possible ...
+    deallocate(buffer)
+
+  end subroutine RK2_2D
+  
+  !> Runge Kutta 2, for 2D domain (i.e rhs == 0).
+  subroutine RK2_2DScalar(dt,topoid,meshid,ghostsize,vel)
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer::ghostsize
+    ! velocity on grid (must be in PPM-style storage)
+    real(mk), dimension(:,:,:,:), pointer :: vel
+    
+    !> local loop indices
+    integer :: i, newNpart,k
+    real(mk) :: alpha
+    !> error status
+    integer :: info,np0
+    ! buffer is used to save intermediate values for xp and omp, thus buffersize=2*npart
+
+    allocate(buffer(dime,npart))
+    buffer_size=npart
+    
+    np0 = npart
+
+    ! First RK stage
+    ! Velocity is up to date, following the call to update_particles
+    alpha=0.5*dt
+    do i=1,npart
+       do k=1,dime
+          buffer(k,i)=xp(k,i)
+          xp(k,i)=buffer(k,i)+alpha*velop(k,i)
+       end do
+    end do
+    ! Particles positions have changed ... we must map between domains
+    !call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
+    newNpart = 0
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+    !! Mesh to particles for the new particles positions ...
+    !> for velocity and rhs
+    call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+    !! Second RK2 stage, with the updated velocity
+    do i=1,npart
+       do k=1,dime
+          xp(k,i)=buffer(k,i)+dt*velop(k,i)
+       end do
+    end do
+ !   call ppm_impose_part_bc(topoid,xp,npart,info) 
+    newNpart = 0
+    !    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    npart = newNpart
+  end subroutine RK2_2DSCALAR
+
+  !> Runge Kutta 2 for positions and 1 for vorticity
+  subroutine push_split_particles(dir,dt,topoid,meshid,ghostsize,vel,coordMin,step)
+    !> splitting direction
+    integer, intent(in) :: dir
+    !> time step
+    real(mk), intent(in) ::dt
+    ! topo and mesh ids
+    integer, intent(in) :: topoid
+    integer, intent(in) :: meshid
+    integer,  dimension(:),pointer:: ghostsize
+    ! velocity on grid
+    real(mk), dimension(:,:,:,:), pointer :: vel
+    real(mk),dimension(dim3),intent(in) :: coordMin,step
+    !> local loop indices
+    integer :: i,newNpart
+    integer :: info
+
+    do i=1,npart
+       buffer(dir,i)=xp(dir,i)
+       xp(dir,i)=buffer(dir,i)+0.5*dt*velop(dir,i)
+    end do
+    ! Particles positions have changed ... we must map between domains
+    !    call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ...
+    newNpart = 0
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    !call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(velop,dime,npart,info)    ! velocity
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_push(buffer,dime,npart,info)    
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(buffer,dime,npart,newNpart,info)
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(velop,dime,npart,newNpart,info)  
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    ! Update sizes ...
+    npart = newNpart
+    !    if(size(rhsp,2).lt.npart) then
+
+    !> for velocity
+    !call ppm_interp_m2p(topoid,meshid,xp,npart,velop,dime,kernel,ghostsize,vel,info)
+    call mesh2particles(vel,coordMin,step,dir)
+    
+    do i=1,npart
+       xp(dir,i)=buffer(dir,i)+dt*velop(dir,i)
+    end do
+
+    ! Vorticity mapping ...
+    newNpart = 0
+    
+    call ppm_map_part_partial(topoid,xp,npart,info) ! positions
+    call ppm_map_part_global(topoid,xp,npart,info) ! positions
+    call ppm_map_part_push(scalar_p,npart,info)    ! vorticity
+    call ppm_map_part_send(npart,newNpart,info)          ! send
+    call ppm_map_part_pop(scalar_p,npart,newNpart,info)
+    call ppm_map_part_pop(xp,dime,npart,newNpart,info) 
+    npart = newNpart
+  end subroutine push_split_particles
+  
+  subroutine mesh2particles(velocity,coordMin,step,dir)
+
+    real(mk),dimension(:,:,:,:),pointer :: velocity
+    integer, intent(in) :: dir
+    real(mk),dimension(dim3),intent(in) :: step
+    real(mk),dimension(dim3),intent(in) :: coordMin
+
+    real(mk) :: coord,dist
+    real(mk),dimension(2) :: weights
+    integer :: i,j
+    integer, dimension(2,dime) :: indGrid
+
+    do i = 1,npart
+       do j = 1,2
+          indGrid(j,:) = ((xp(:,i) - coordMin(:))/step(:) + epsilon(pi))+1
+       enddo
+       indGrid(2,dir) = indGrid(1,dir) + 1
+     
+       coord = coordMin(dir) + (indGrid(1,dir)-1)*step(dir)
+       dist = (xp(dir,i) - coord)/step(dir)
+       weights(1) = 1.-dist
+       weights(2) = dist
+       
+       velop(dir,i) =  weights(1)*velocity(dir,indGrid(1,1),indGrid(1,2),indGrid(1,3))&
+            + weights(2)*velocity(dir,indGrid(2,1),indGrid(2,2),indGrid(2,3))
+    end do
+
+  end subroutine mesh2particles
+
+  subroutine remesh_split_mp6(scalar,dir,step,coordMin)
+    real(mk),dimension(:,:,:), pointer :: scalar
+    integer, intent(in) :: dir
+    real(mk),dimension(dim3),intent(in) :: step
+    real(mk),dimension(dim3),intent(in) :: coordMin
+
+    real(mk) :: coord,dist
+    real(mk),dimension(6) :: val,weights
+    integer :: i,j
+
+    integer, dimension(6,dime) :: indGrid
+
+    scalar = 0.0
+
+    do i = 1,npart
+       do j = 1,6
+          indGrid(j,:) = ((xp(:,i) - coordMin(:))/step(:) + epsilon(pi))+1
+       enddo
+       indGrid(1,dir) = indGrid(3,dir) - 2
+       indGrid(2,dir) = indGrid(3,dir) - 1
+       indGrid(4,dir) = indGrid(3,dir) + 1
+       indGrid(5,dir) = indGrid(3,dir) + 2
+       indGrid(6,dir) = indGrid(3,dir) + 3
+
+       coord = coordMin(dir) + (indGrid(3,dir)-1)*step(dir)
+       dist = (xp(dir,i) - coord)/step(dir)
+       weights(1) = -(dist)*(5.*(dist + 2.)-8.)*(dist - 1.)**3/24.
+       weights(2) = (dist)*(dist - 1.)*(25.*(dist + 1.)**3-114.*(dist + 1.)**2+153.*(dist + 1.)-48.)/24.
+       weights(3) = -(dist-1.)*(25.*dist**4-38.*dist**3-3.*dist**2+12.*dist+12)/12.
+       weights(4) = (dist)*(25.*(1. - dist)**4-38.*(1. - dist)**3-3.*(1. - dist)**2+12.*(1. - dist)+12)/12.
+       weights(5) = (1. - dist)*(-dist)*(25.*(2. - dist)**3-114.*(2. - dist)**2+153.*(2. - dist)-48.)/24.
+       weights(6) = -(1. - dist)*(5.*(3. - dist)-8.)*(-dist)**3/24.
+
+       val(:) = scalar_p(i)*weights(:)
+        do j = 1, 6
+           scalar(indGrid(j,c_X),indGrid(j,c_Y),indGrid(j,c_Z)) = &
+               scalar(indGrid(j,c_X),indGrid(j,c_Y),indGrid(j,c_Z)) + val(j)
+       end do
+
+    end do
+ 
+  end subroutine remesh_split_mp6
+  
+
+  !> Return the memory used to save var. attached to particles
+  function getMemoryUsedForParticles()
+    real(mk) :: getMemoryUsedForParticles
+    
+    getMemoryUsedForParticles = sizeof(xp)+sizeof(velop)+sizeof(rhsp)+sizeof(buffer)+&
+         sizeof(buffer2)+sizeof(buffer3)
+    getMemoryUsedForParticles = getMemoryUsedForParticles*1e-6
+    if(verbose) then
+       write(*,'(a,i3,a,f10.4,a)') &
+            '[',rank,'] memory used for particles:', getMemoryUsedForParticles, ' MB.'
+    end if
+
+  end function getMemoryUsedForParticles
+
+  !> Create particles on the grid points where refField>cutoff.
+  subroutine countAndCreateParticles(refField,velocity,resolution,step,coordMin)
+    
+    !> Field (on grid) used to set particles
+    real(mk), dimension(:,:,:,:), pointer :: refField
+    !> velocity on the grid
+    real(mk), dimension(:,:,:,:), pointer :: velocity
+    !> Local resolution
+    integer, dimension(dim3),intent(in) :: resolution
+    !> Space step
+    real(mk), dimension(dim3),intent(in) :: step
+    !> Coordinates of the lowest point of the current domain
+    real(mk), dimension(dim3),intent(in) :: coordMin
+    
+    ! particles counter
+    integer :: count
+    ! norm of refField at each point of the grid
+    real(mk) :: strength
+    ! coordinates
+    real(mk), dimension(dim3) :: coord
+    integer, dimension(dim3) :: nbCells
+    
+    integer :: i,j,k
+    
+    nbCells = max(resolution-1,1)
+
+    print *, "uuuu",maxval(refField)
+    
+    ! Count the number of particles within cutoff bounds.
+    ! Loop over grid points. We use 'max' to allow the '1 point in one direction' case 
+    ! (i.e. to deal with 2D case in a 3D context)
+    ! Remark : there is no resize function in fortran so we must count first, allocate and then fill fields. 
+    count = 0
+
+    print *, "str", cutoff
+   do k=1,nbCells(c_Z)
+       do j=1,nbCells(c_Y)
+          do i=1,nbCells(c_X)
+             strength = sqrt(sum(refField(:,i,j,k)**2))
+             if((strength.gt.cutoff(1)).and.(strength.lt.cutoff(2))) then
+                count = count + 1
+             end if
+          end do
+       end do
+    end do
+    
+    ! Allocations
+    allocate(xp(dime,count),omp(dime,count),velop(dime,count))
+    
+    print *, 'nb parts ...', count, shape(xp)
+
+    ! and set values
+    coord = coordMin
+    count = 0
+    do k=1,nbCells(c_Z)
+       do j=1,nbCells(c_Y)
+          do i=1,nbCells(c_X)
+             strength = sqrt(sum(refField(:,i,j,k)**2))
+             if((strength.gt.cutoff(1)).and.(strength.lt.cutoff(2))) then
+                count = count + 1
+                omp(:,count) = refField(:,i,j,k)
+                velop(:,count) = velocity(:,i,j,k)
+                xp(:,count) = coord(1:dime)
+             end if
+             coord(c_X) = coord(c_X) + step(c_X)
+          end do
+          coord(c_Y) = coord(c_Y) + step(c_Y)
+       end do
+       coord(c_Z) = coord(c_Z) + step(c_X)
+    end do
+    
+    npart = count
+    write(*,'(a,i5,a,i8,a)') '[',rank,'] initialisation with ', npart,' particles'
+
+  end subroutine countAndCreateParticles
+
+  !> Update particles on the grid points where refField>cutoff.
+  !! Note : There are deallocation/reallocation if the number of particles as increased.
+  subroutine countAndUpdateParticles(refField,velocity,resolution,step,coordMin)
+    
+    !> Field (on grid) used to set particles
+    real(mk), dimension(:,:,:,:), pointer :: refField
+    !> velocity on the grid
+    real(mk), dimension(:,:,:,:), pointer :: velocity
+    !> Local resolution
+    integer, dimension(dim3),intent(in) :: resolution
+    !> Space step
+    real(mk), dimension(dim3),intent(in) :: step
+    !> Coordinates of the lowest point of the current domain
+    real(mk), dimension(dim3),intent(in) :: coordMin
+    
+    ! particles counter
+    integer :: count
+    ! norm of refField at each point of the grid
+    real(mk) :: strength
+    ! coordinates
+    real(mk), dimension(dim3) :: coord
+    integer, dimension(dim3) :: nbCells
+    integer :: i,j,k
+       
+    nbCells = max(resolution-1,1)
+    
+    ! Count the number of particles within cutoff bounds.
+    ! Loop over grid points. We use 'max' to allow the '1 point in one direction' case 
+    ! (i.e. to deal with 2D case in a 3D context)
+    ! Remark : there is no resize function in fortran so we must count first, allocate and then fill fields. 
+    count = 0
+    do k=1,max(resolution(c_Z)-1,1)
+       do j=1,max(resolution(c_Y)-1,1)
+          do i=1,max(resolution(c_X)-1,1)
+             strength = sqrt(sum(refField(:,i,j,k)**2))
+             if((strength.gt.cutoff(1)).and.(strength.lt.cutoff(2))) then
+                count = count + 1
+             end if
+          end do
+       end do
+    end do
+    
+    ! Check if reallocation is required
+    if(count > npart) then
+       ! Free old memory
+       if(associated(xp)) deallocate(xp)
+       if(associated(omp)) deallocate(omp)
+       if(associated(velop)) deallocate(velop)
+    
+       ! Allocations
+       allocate(xp(dime,count),omp(dime,count),velop(dime,count))
+    end if
+
+    ! and set values
+    coord = coordMin
+    count = 0
+    do k=1,max(resolution(c_Z)-1,1)
+       do j=1,max(resolution(c_Y)-1,1)
+          do i=1,max(resolution(c_X)-1,1)
+             strength = sqrt(sum(refField(:,i,j,k)**2))
+             if((strength.gt.cutoff(1)).and.(strength.lt.cutoff(2))) then
+                count = count + 1
+                omp(:,count) = refField(:,i,j,k)
+                velop(:,count) = velocity(:,i,j,k)
+                xp(:,count) = coord
+             end if
+             coord(c_X) = coord(c_X) + step(c_X)
+          end do
+          coord(c_Y) = coord(c_Y) + step(c_Y)
+       end do
+       coord(c_Z) = coord(c_Z) + step(c_X)
+    end do
+    
+    npart = count
+    write(*,'(a,i5,a,i8,a)') '[',rank,'] initialisation with ', npart,' particles'
+  end subroutine countAndUpdateParticles
+
+  !> Create particles on the grid points where refField>cutoff.
+  subroutine createParticlesEverywhere(refField,velocity,resolution,step,coordMin)
+    
+    !> Field (on grid) used to set particles
+    real(mk), dimension(:,:,:,:), pointer :: refField
+    !> velocity on the grid
+    real(mk), dimension(:,:,:,:), pointer :: velocity
+    !> Local resolution
+    integer, dimension(dim3),intent(in) :: resolution
+    !> Space step
+    real(mk), dimension(dim3),intent(in) :: step
+    !> Coordinates of the lowest point of the current domain
+    real(mk), dimension(dim3),intent(in) :: coordMin
+    
+    ! coordinates
+    real(mk), dimension(dim3) :: coord
+    integer, dimension(dim3) :: nbCells
+    integer :: i,j,k,count
+    
+    nbCells = max(resolution-1,1)
+    coord = coordMin
+    count = 0
+    npart = product(nbCells)
+    ! Allocations
+    allocate(xp(dime,npart),omp(dime,npart),velop(dime,npart))
+    do k=1,nbCells(c_Z)
+       do j=1,nbCells(c_Y)
+          do i=1,nbCells(c_X)
+             count = count + 1
+             omp(:,count) = refField(:,i,j,k)
+             velop(:,count) = velocity(:,i,j,k)
+             xp(:,count) = coord
+             coord(c_X) = coord(c_X) + step(c_X)
+          end do
+          coord(c_X) = coordMin(c_X)
+          coord(c_Y) = coord(c_Y) + step(c_Y)
+       end do
+       coord(c_Y) = coordMin(c_Y)
+       coord(c_Z) = coord(c_Z) + step(c_X)
+    end do
+    
+    write(*,'(a,i5,a,i8,a)') '[',rank,'] initialisation with ', npart,' particles'
+
+  end subroutine createParticlesEverywhere
+  
+  !> Create particles on the grid points where refField>cutoff.
+  subroutine createParticlesEverywhereScalar(refField,velocity,resolution,step,coordMin)
+    
+    !> Field (on grid) used to set particles
+    real(mk), dimension(:,:,:), pointer :: refField
+    !> velocity on the grid
+    real(mk), dimension(:,:,:,:), pointer :: velocity
+    !> Local resolution
+    integer, dimension(dim3),intent(in) :: resolution
+    !> Space step
+    real(mk), dimension(dim3),intent(in) :: step
+    !> Coordinates of the lowest point of the current domain
+    real(mk), dimension(dim3),intent(in) :: coordMin
+    
+    ! coordinates
+    real(mk), dimension(dim3) :: coord
+    integer, dimension(dim3) :: nbCells
+    integer :: i,j,k,count
+    
+    nbCells = max(resolution-1,1)
+    coord = coordMin
+    count = 0
+    npart = product(nbCells)
+    ! Allocations
+    allocate(xp(dime,npart),scalar_p(npart),velop(dime,npart))
+    do k=1,nbCells(c_Z)
+       do j=1,nbCells(c_Y)
+          do i=1,nbCells(c_X)
+             count = count + 1
+             scalar_p(count) = refField(i,j,k)
+             velop(:,count) = velocity(:,i,j,k)
+             xp(:,count) = coord
+             coord(c_X) = coord(c_X) + step(c_X)
+          end do
+          coord(c_X) = coordMin(c_X)
+          coord(c_Y) = coord(c_Y) + step(c_Y)
+       end do
+       coord(c_Y) = coordMin(c_Y)
+       coord(c_Z) = coord(c_Z) + step(c_X)
+    end do
+    
+    write(*,'(a,i5,a,i8,a)') '[',rank,'] initialisation with ', npart,' particles'
+    
+    open(45,file="scalp") ! Output only for one process
+    do i = 1,npart
+       write(45,'(6e14.5)') xp(c_X,i),scalar_p(i)
+    end do
+    close(45)
+  end subroutine createParticlesEverywhereScalar
+
+
+  !> Free all arrays carried by particles
+  !! useful only for tests.
+  ! --------> is it required to call some specific routines to clean anything related to particles, at the end of the simulation?
+  ! or ppm_finalize do it all?  
+  ! According to Omar: do not use ppm "internal" routines but clean it yourself. 
+  subroutine freeParticles()
+    if(associated(xp)) deallocate(xp)
+    if(associated(omp)) deallocate(omp)
+    if(associated(velop)) deallocate(velop)
+    if(associated(rhsp)) deallocate(rhsp)
+    if(associated(buffer)) deallocate(buffer)
+    if(associated(buffer2)) deallocate(buffer2)
+    if(associated(scalar_p)) deallocate(scalar_p)
+    npart = 0
+    buffer_size = 0
+  end subroutine freeParticles
+
+  subroutine remesh2D(field,coordMin,step,resolution)
+    
+    real(mk),dimension(:,:,:),pointer :: field
+    real(mk),dimension(dim3),intent(in) :: coordMin
+    real(mk),dimension(dim3),intent(in) :: step
+    integer, dimension(dim3),intent(in) :: resolution
+
+    real(mk),dimension(dime) :: invStep
+    !! List of grid point indices for each particle : [Left-1 Left Right Right+1]
+    !! Left/Right == left/right points of the grid around the particle
+    integer, dimension(4,dime) :: indGrid
+    !! current point coordinates and distance to the left boundary of the local domain
+    real(mk),dimension(dime) :: coord,dist
+    !! weights
+    real(mk),dimension(4,dime) :: weights
+    integer:: i,j
+    invStep = 1./step(1:dime)
+    do i = 1,npart
+       indGrid(2,:) = ((xp(:,i) - coordMin(:))*invStep(:) + epsilon(pi))+1
+       indGrid(1,:) = indGrid(2,:) - 1
+       indGrid(3,:) = indGrid(2,:) + 1
+       indGrid(4,:) = indGrid(2,:) + 2
+       !enforce periodicity
+       do j =1,4
+          indGrid(j,:) = mod(indGrid(j,:)+resolution(1:dime),resolution(1:dime))+1
+       end do
+
+       coord = coordMin(1:dime) + (indGrid(2,:)-1)*step(1:dime)
+       dist = (xp(:,i) - coord)*invStep
+       
+       weights(1,:) = 0.5*dist*((1.-dist)**2)
+       weights(2,:) = 1.-2.5*dist**2+1.5*dist**3
+       weights(3,:) = 1.-2.5*(1.-dist)**2+1.5*(1.-dist)**3
+       weights(4,:) = 0.5*dist**2*(dist-1.)
+       
+       do j =1,4
+          field(indGrid(j,c_X),indGrid(:,c_Y),1) = field(indGrid(j,c_X),indGrid(:,c_Y),1)&
+               + weights(j,c_X)*weights(:,c_Y)*scalar_p(i)
+       end do
+
+    end do
+    
+  end subroutine remesh2D
+
+
+
+end module Particles
diff --git a/HySoP/src/ppmInterface/Solver.f90 b/HySoP/src/ppmInterface/Solver.f90
new file mode 100755
index 0000000000000000000000000000000000000000..532f0e514190160f32c65233e01a53addc3d772e
--- /dev/null
+++ b/HySoP/src/ppmInterface/Solver.f90
@@ -0,0 +1,164 @@
+!> Poisson Solver on the grid
+!
+! Notes Franck concerning multigrid solver :
+! - the old (before jan 2012) version of this solver was bugged in ppm. I let the Parmes routines calling this solver 
+! in the present file for future debugs, but they are not (really not ...) usable.
+
+module Solver
+
+  use client_data, only: mk, dime,rank
+!!!!!!! Comment to remove ppm  use ppm_module_numerics_data, only: ppm_param_eq_poisson, ppm_param_smooth_rbsor
+!!!!!!! Comment to remove ppm  use ppm_module_poisson, only: ppm_poisson_init, ppm_poisson_plan, ppm_poisson_grn_pois_per, ppm_poisson_solve, &
+!!!!!!! Comment to remove ppm  ppm_poisson_drv_curl_fd2, ppm_poisson_drv_curl_fd4, ppm_poisson_drv_none
+!!!!!!! Comment to remove ppm  use client_topology
+!!!!!!! Comment to remove ppm  use ppm_module_typedef, only : ppm_param_mesh_coarsen
+!!!!!!! Comment to remove ppm  use ppm_module_mesh_derive, only: ppm_mesh_derive
+!  use ppmljk_poisson
+  
+  !! All multigrid stuff ...
+  !!use ppm_module_mg_init
+  !!use ppm_module_mg_solv
+
+  implicit none
+
+  private
+
+  public :: init_poisson_solver, solve_poisson!!!, ppm_poisson_drv_curl_fd2, ppm_poisson_drv_curl_fd4, ppm_poisson_drv_none
+
+  type(ppm_poisson_plan),pointer :: fftwplanForppm => NULL()
+
+  ! Interface to poisson solver initialisation. Only fft at the time
+  interface init_poisson_solver
+     module procedure init_fftw
+  end interface
+
+  interface solve_poisson
+     module procedure solve_poisson_fftw
+  end interface
+
+contains
+  
+  !> Do not work, bug in ppm numerics ...
+!!$  subroutine init_multigrid(topoid, meshid, ghostsize, bc)
+!!$    
+!!$    integer, intent(in) :: topoid
+!!$    integer, intent(in) :: meshid
+!!$    integer, dimension(:), pointer :: ghostsize
+!!$    integer, dimension(:), pointer :: bc
+!!$
+!!$    integer,  dimension(dime,2*dime):: ibcdef
+!!$    real(mk), dimension(dime,1,1,1,1):: ibcvalue
+!!$
+!!$    logical :: with_w_cycles, dump_info
+!!$    real(mk) :: omega
+!!$    integer :: limlev ! Number of mg levels
+!!$    integer :: info
+!!$
+!!$    info = -1
+!!$    limlev = 2 ! Warning, limlev must cope with grid resolution ...
+!!$    omega = 1.15_mk ! Relaxation param
+!!$    with_w_cycles = .FALSE.
+!!$    dump_info = .TRUE.
+!!$    ibcvalue(:,:,:,:,:)=0.0_mk
+!!$    ibcdef(1,:)=bc(:)
+!!$    ibcdef(2,:)=bc(:)
+!!$    ibcdef(3,:)=bc(:)
+!!$    ! Initialize the solver
+!!$    !! ppm_param_eq_poisson : equation type. Poisson is the only possible type ...
+!!$    !! ppm_param_smooth_rbsor : Gauss Seidel, only possible type
+!!$    ! Anyway, at the time the two parameters above are not used in ppm routine. 
+!!$    !! maxlev : number of levels in multigrid
+!!$    !! 
+!!$    
+!!$    call ppm_mg_init(topoid, ppm_param_eq_poisson,ghostsize, ppm_param_smooth_rbsor,dime,ibcdef,&
+!!$         ibcvalue,meshid,limlev,with_w_cycles,dump_info,omega,info)
+!!$   
+!!$    if(info.ne.0) stop 'Init_multigrid failed ...'
+!!$    
+!!$  end subroutine init_multigrid
+!!$
+!!$  !> Do not work
+!!$  subroutine solve_poisson_multigrid(topoid, field, rhs)
+!!$    integer, intent(in) :: topoid
+!!$    real(mk), dimension(:,:,:,:,:), pointer :: field
+!!$    real(mk), dimension(:,:,:,:,:), pointer :: rhs
+!!$
+!!$    integer :: itera, iterf, iter1, iter2, info
+!!$    real(mk) :: Eu
+!!$    
+!!$    itera = 3
+!!$    iterf = 3
+!!$    iter1 = 10
+!!$    iter2 = 4
+!!$    print *, 'solve ...', topoid
+!!$
+!!$    ! Bug inside ...
+!!$    call ppm_mg_solv(topoid, field, rhs, dime, itera, iterf, iter1, iter2, Eu, info)
+!!$
+!!$  end subroutine solve_poisson_multigrid
+
+  !> Init fftw through ppm routines
+  subroutine init_fftw(fieldin,fieldout,topoid,meshid,deriveValue)
+    
+    real(mk), dimension(:,:,:,:,:), pointer :: fieldin, fieldout
+    integer, intent(in) :: topoid, meshid
+    integer, intent(in), optional :: deriveValue
+    
+    ! Parameters for ppm poisson routine
+    integer :: info
+    
+    ! Flag to select built-in Green functions (...)
+    integer :: green
+    integer :: der
+    
+    if (present(deriveValue)) then
+       der = deriveValue
+    else
+       der = ppm_poisson_drv_curl_fd4
+    end if
+    
+    green = ppm_poisson_grn_pois_per ! periodic boundaries
+    info = -1
+    allocate(fftwplanForppm)
+    ! Call ppm routine to initialize fftw plan. 
+    call ppm_poisson_init(topoid,meshid,fftwplanForppm,fieldin,fieldout,green,info,derive=ppm_poisson_drv_curl_fd4)
+    !call mypoisson_init(topoid,meshid,fftwplanForppm,fieldin,fieldout,info)
+    
+    if(info.NE.0) stop 'PPM Poisson solver init failed.'
+  end subroutine init_fftw
+
+  !> Interface to ppm poisson solver with fftw
+  subroutine solve_poisson_fftw(fieldin,fieldout,topoid,meshid,ghostsize)
+    
+    real(mk), dimension(:,:,:,:,:), pointer ::  fieldin, fieldout
+    integer, intent(in) :: meshid, topoid
+    integer, dimension(:),pointer::ghostsize
+
+    integer :: info
+    ! Finite diff scheme used to compute curl
+    !integer :: dtype
+
+    info = -1
+    
+    ! Solves laplacian(fieldout) = - fieldin
+    call ppm_poisson_solve(topoid,meshid,fftwplanForppm,fieldin,fieldout,ghostsize,info)
+    !call mypoisson_solve(topoid,meshid,fftwplanForppm,fieldin,fieldout,ghostsize,info)
+    if(info.NE.0) stop 'PPM Poisson solver failed.'
+    
+    ! info = -1
+    ! Computes fieldout = curl(fieldout) using finite differences, 2nd order (dtype = ppm_poisson_drv_curl_fd2), 
+    ! or 4th order (dtype = ppm_poisson_drv_curl_fd4). Last one is untested according to ppm doc ...
+    ! According to Johannes, not needed if I set derive in poisson_init
+    !call ppm_poisson_fd(topoid,meshid,fieldout,fieldout,dtype,info)
+    !if(info.NE.0) stop 'PPM Poisson, curl computation failed.'
+
+  end subroutine solve_poisson_fftw
+
+  subroutine finalize_poisson_fftw()
+    ! TODO ...
+    ! In ppm nothing is done to free fftw plan or other temp. buffers. 
+  end subroutine finalize_poisson_fftw
+  
+end module Solver
+
+
diff --git a/HySoP/src/ppmInterface/Topology.f90 b/HySoP/src/ppmInterface/Topology.f90
new file mode 100755
index 0000000000000000000000000000000000000000..77147a660ee6e26b26ceb3816461289557339b49
--- /dev/null
+++ b/HySoP/src/ppmInterface/Topology.f90
@@ -0,0 +1,225 @@
+!> Tools to create and init the topology
+!! Based on ppm
+module client_topology
+  
+  use ppm_module_typedef, only : ppm_t_topo
+  use ppm_module_mktopo
+  use ppm_module_topo_get
+  use ppm_module_mesh_define
+  use ppm_module_data, only : ppm_param_assign_internal,ppm_param_decomp_cartesian,ppm_topo,&
+       ppm_param_decomp_xpencil,ppm_param_decomp_ypencil,ppm_param_decomp_zpencil
+
+  use client_data, only : mk,dime,dim3
+  use parmesTools
+
+  implicit none
+
+  !> A pointer to a ppm topology
+  type :: topoPtr
+     !> pointer to ppm topo
+     type(ppm_t_topo), pointer :: ptr
+     !> id of the local mesh on the current topo (Warning : we consider only 1 mesh/topo, at the present time)
+     integer :: meshid
+  end type topoPtr
+
+  !> A pointer to ppm topology object 
+  type(ppm_t_topo), pointer :: topo => null()
+  !> Array of 1D topologies for FFTW solvers
+  type(topoPtr), dimension(dime) :: topo1D
+  !> 1D topology for FFTW solvers
+  type(ppm_t_topo), pointer :: topoY => NULL()
+  !> number of sub-domains on the current (mpi)proc. This is for ppm and should be 1 in our case. 
+  integer :: nsublist
+  !> number of the current mpi proc according to ppm. May be different from rank and is required.
+  integer :: isubl
+  !> local mesh number
+  integer :: meshNum
+
+  private
+
+  public PPMinitTopo, topo, meshNum, ppm_t_topo, getPPMLocalResolution,nsublist,isubl,createTopologyY,topoY
+
+contains
+
+  !> Create the topology - Based on ppm_topo_mkfield routine (interface = ppm_mktopo)
+  subroutine PPMinitTopo(minPos,maxPos,bc,ghostsize,resolution)
+
+    real(mk), dimension(:),pointer :: minpos,maxpos
+    integer,  dimension(:),pointer :: bc
+    integer,  dimension(:),pointer :: ghostsize
+    integer,  dimension(:),pointer :: resolution
+    
+    ! Unused parameter that must be present as an input arg ...
+    real(mk), dimension(:,:),pointer :: false_xp => NULL()
+    real(mk), dimension(:),pointer :: subcost => NULL()
+    integer :: false_np
+    integer assigning, decomposition
+    integer :: info, meshid, topoid
+
+    info = -1
+    topoid=0
+    meshid =-1
+    decomposition = ppm_param_decomp_cartesian  
+    assigning     = ppm_param_assign_internal
+    false_np = -1 ! Must be negative to have purely mesh-based decomposition
+    ! topology
+    ! Note FP: ghostsize are never used in mktopo for decomp_cartesian.
+    ! Note2 FP: struct topo as input results in failure. 
+
+    call ppm_mktopo(topoid,meshid,false_xp,false_np,decomposition,assigning,minPos(1:dime),maxPos(1:dime),bc, & 
+         ghostsize,subcost,resolution(1:dime),info)
+    !decomposition = ppm_param_decomp_xy_slab
+    !ghostsize =0.0
+    !CALL ppm_mktopo(topoid,meshid,false_xp,0,decomposition,assigning,minPos,maxPos,bc,&
+    !     ghostsize,subcost,resolution,info)
+    
+    !meshid2=-1
+    !nmxyc=resolution
+    !nmxyc(1)=(resolution(1)-1)/2 +1
+    !CALL ppm_mesh_define(topoid,meshid2,nmxyc,istartxyc,ndataxyc,info)
+
+    !print *, 'iaiaaoaioaioa', ppm_topo(topoid)%t%mesh(meshid2)%nnodes
+    if(info.ne.0) stop 'Topology:init initialisation failed'
+
+    ! Get the created topology ...
+    topo=>ppm_topo(topoid)%t
+    
+    ! We init ppm numbers for sub domains
+    nsublist=topo%nsublist
+    call parmesAssert(nsublist,1,'there are several sub-domains on the current process and that is not allowed.')
+   !> Number of the current process for ppm. May be different from rank.
+    isubl = topo%isublist(nsublist)
+    !! If we need nsublist > 1, every place where isubl is used should look like :
+    ! do isub=1,nsublist
+    !   isubl=topo%isublist(isub)
+    !   call something(...,isubl,...)
+    ! enddo
+ 
+    ! we suppose that there is only on mesh created on each subdomain 
+    meshNum = topo%max_meshid
+    call parmesAssert(meshNum,1,&
+         'it seems that several meshes are defined on the current sub-domain, which may result in simulation failure.')
+
+    print *, rank, "AAAAAAAAAAAAAA", shape(topo%mesh(meshNum)%nnodes), "nbnbnb", topo%mesh(meshNum)%nnodes
+    
+!!$    call ppm_topo_get_meshinfo(topoid,meshid,nm,istart,ndata,maxndata,isublist,nsublist,info)
+!!$    
+!!$    print *, 'nm', nm
+!!$    print *, 'istart', istart
+!!$    print *, 'ndata', ndata
+!!$    print *, 'maxndata',maxndata
+!!$    print *, 'isublist', isublist
+!!$    print *, 'nsublist',nsublist
+    ! Notes Franck:
+    ! nsublist is the number of subdomains for one processus (mpi proc not physical proc ...)
+    ! isublist[i] = global number of the subdomain i (i being the local number)
+    ! Warning: it seems that isublist is of size mpi-number-of proc but only the 1:nsublist elements are relevent
+    ! sub2proc[i] = global number of the processus on which subdomain i is attached 
+    ! 
+!!$    
+!!    print *, '[', rank,'], subs :  ', topo%nsublist, '//', shape(topo%isublist),'//', topo%isublist(1)
+!!$    print *, '======================= [', rank,'] ', shape(topo%sub2proc)
+!!$    print *, '======================= [', rank,'] ', topo%sub2proc
+!!$    
+!!$    
+!!$    print *, '======================= [', rank,'] ', shape(topo%mesh(1)%nnodes)
+!!$    print *, '======================= [', rank,'] ', topo%mesh(1)%nnodes
+!!$    print *, '======================= [', rank,'] ', shape(topo%mesh(1)%istart)
+!!$    print *, '======================= [', rank,'] ', topo%mesh(1)%Nm
+
+  end subroutine PPMinitTopo
+
+  !> Return the local (mpi) subdomain resolution
+  function getPPMLocalResolution(localTopo,meshid)
+    
+    !> topology of interest
+    type(ppm_t_topo), pointer :: localTopo
+    !> id of the mesh from which we need to get the resolution
+    integer, intent(in) :: meshid
+    integer,dimension(dim3) :: getPPMLocalResolution
+
+    getPPMLocalResolution = 1
+    getPPMLocalResolution(1:dime) = maxval(localTopo%mesh(meshid)%nnodes(:,localTopo%isublist(1:localTopo%nsublist)),2) 
+
+  end function getPPMLocalResolution
+
+  !> 1D topologies creation, for fftw solvers
+  !! 
+  subroutine create1DTopologies(minPos,maxPos,bc,resolution)
+    
+    real(mk), dimension(:),pointer :: minpos,maxpos
+    integer,  dimension(:),pointer :: bc
+    integer,  dimension(:),pointer :: resolution
+    
+    ! Unused parameter that must be present as an input arg ...
+    real(mk), dimension(:,:),pointer :: false_xp => NULL()
+    real(mk), dimension(:),pointer :: subcost => NULL()
+    integer :: false_np
+    integer :: assigning
+    integer, dimension(dim3) :: decomposition
+    integer :: info, meshid, topoid,i
+    integer,dimension(dim3), parameter :: zeros = (/0,0,0/)
+    
+    info = 0
+    decomposition(c_X) = ppm_param_decomp_xpencil
+    decomposition(c_Y) = ppm_param_decomp_ypencil
+    decomposition(c_Z) = ppm_param_decomp_zpencil
+    assigning     = ppm_param_assign_internal
+    false_np = 0 ! Purely mesh-based decomposition
+    
+    ! No ghosts for these topologies
+
+    ! loop over dimensions
+    do i=1,dime
+       topoid = 0
+       meshid = -1
+       call ppm_mktopo(topoid,meshid,false_xp,0,decomposition(i),assigning,minpos(1:dime),maxpos(1:dime),bc,&
+            zeros(1:dime),subcost,resolution(1:dime),info)
+       topo1D(i)%ptr=>ppm_topo(topoid)%t
+       topo1D(i)%meshid = meshid
+    end do
+
+    print *,"iijiji", topo1D(c_X)%ptr%mesh(meshid)%nnodes
+    
+  end subroutine create1DTopologies
+
+  !> 1D topologies creation, for fftw solvers
+  !! 
+  subroutine createTopologyY(topoid,meshid,minPos,maxPos,bc,resolution)
+    
+    real(mk), dimension(:),pointer :: minpos,maxpos
+    integer,  dimension(:),pointer :: bc
+    integer,  dimension(:),pointer :: resolution
+    integer, intent(inout) :: topoid
+    integer, intent(inout) :: meshid
+
+    ! Unused parameter that must be present as an input arg ...
+    real(mk), dimension(:,:),pointer :: false_xp => NULL()
+    real(mk), dimension(:),pointer :: subcost => NULL()
+    integer :: false_np
+    integer :: assigning
+    integer  :: decomposition
+    integer :: info
+    integer,dimension(dime), parameter :: zeros = (/0,0/)
+    
+    info = 0
+
+    decomposition = ppm_param_decomp_xpencil
+    assigning     = ppm_param_assign_internal
+    false_np = 0 ! Purely mesh-based decomposition
+    
+    ! No ghosts for these topologies
+    ! loop over dimensions
+    topoid = 0
+    meshid = -1
+    call ppm_mktopo(topoid,meshid,false_xp,0,decomposition,assigning,minpos(1:dime),maxpos(1:dime),bc,&
+         zeros(1:dime),subcost,resolution(1:dime),info)
+    topoY=>ppm_topo(topoid)%t
+    
+    print *,rank,"iijiji", topoY%mesh(meshid)%nnodes
+    print *,rank,"iooooooooijiji", getPPMLocalResolution(topoY,meshid)
+    
+  end subroutine createTopologyY
+
+
+end module client_topology
diff --git a/HySoP/src/ppmInterface/callCharFunc.f90 b/HySoP/src/ppmInterface/callCharFunc.f90
new file mode 100644
index 0000000000000000000000000000000000000000..9ba929739ea6b8cafb589d87b62c7f2393242ca5
--- /dev/null
+++ b/HySoP/src/ppmInterface/callCharFunc.f90
@@ -0,0 +1,39 @@
+!> \file callCharFunc.f90 Wrapper to fortran functions that have a char-type input argument.
+
+!> \brief Module to wrap fortran functions with char arg.
+module charFunctions
+
+  use ppm_module_substart
+  use ppm_module_substop
+  
+  implicit none
+
+contains
+
+  !> Wrapper to ppm substart function
+  !! @param[in,out]  cpu time when this function is called
+  !! @param[in] error status
+  !! @param[in] Name of the calling-function
+  subroutine start(t0,info,msg)
+
+    real(8) :: t0
+    character(len=*) :: msg
+    integer :: info
+    call substart(msg,t0,info)
+  end subroutine start
+  
+  !> Wrapper to ppm substop function : print caller name and cpu time elasped since start call (for the same caller)
+  !! @param[in]  cpu time, value returned during substart call
+  !! @param[in] error status
+  !! @param[in] Name of the calling-function
+  subroutine stop(t0,info,msg)
+
+    real(8) :: t0
+    character(len=*) :: msg
+    integer :: info
+
+    call substop(msg,t0,info)
+  end subroutine stop
+
+end module charFunctions
+
diff --git a/HySoP/src/ppmInterface/poisson_init.f90 b/HySoP/src/ppmInterface/poisson_init.f90
new file mode 100755
index 0000000000000000000000000000000000000000..42ad26088b3b3c18cdaa7c42326691da3357cde1
--- /dev/null
+++ b/HySoP/src/ppmInterface/poisson_init.f90
@@ -0,0 +1,597 @@
+module ppmljk_poisson
+
+  use client_data
+  use ppm_module_poisson
+  use ppm_module_mktopo
+  use ppm_module_topo_get
+  use ppm_module_mesh_define
+  use ppm_module_map_field
+  use ppm_module_map_field_global
+  use ppm_module_map
+  use mpi
+
+  implicit none
+
+  integer,dimension(dim3), parameter :: zeros = (/0,0,0/)
+
+  !#define __ZEROSI (/0,0,0/)
+
+contains
+
+  !> Initialisation of Poisson solver (fft), based on ppm and on a copy on the 
+  !! equivalent file in ppm
+  subroutine mypoisson_init(topoid,meshid,ppmpoisson,fieldin,fieldout,info)
+!!! * ppm_poisson_grn_pois_per - Poisson equation, periodic boundaries
+!!!
+!!! [NOTE]
+!!! fieldin is not preserved by this routine!
+!!! fieldin and fieldout must NOT be the same fields. In-place FFTs have
+!!! not been implemented.
+!!!
+
+    !> the topology id
+    integer, intent(in) :: topoid
+    !> corresponding mesh id
+    integer, intent(in) :: meshid
+    !> fftw plan interface
+    type(ppm_poisson_plan),intent(out) :: ppmpoisson
+    !> input field (i.e. rhs of poisson eq.)
+    !@ strictly speaking fieldin is not being used in the init routine
+    real(mk), dimension(:,:,:,:,:), pointer :: fieldin
+    !> output field
+    real(mk), dimension(:,:,:,:,:), pointer :: fieldout
+    !> error status
+    integer, intent(out) :: info
+
+    !-------------------------------------------------------------------------
+    ! Local variables
+    !-------------------------------------------------------------------------
+    !  real(mk)                            :: t0
+    real(mk),dimension(:,:),pointer     :: xp=>NULL()      !particle positions
+    TYPE(ppm_t_topo),pointer                :: topology=>NULL()
+    TYPE(ppm_t_equi_mesh)                   :: mesh
+    integer ,dimension(dime)               :: indl,indu
+    real(mk),PARAMETER                  :: PI=ACOS(-1.0_mk) !@ use ppm pi
+    real(mk)                            :: normfac
+!!!factor for the Greens function, including FFT normalization
+    integer                                 :: i,j,k
+    integer                                 :: kx,ky,kz
+    integer                                 :: isubl,isub
+    integer,dimension(dime*2)              :: bcdef
+    integer                                 :: assigning
+    integer                                 :: decomposition
+    integer,SAVE                            :: ttopoid
+    integer                                 :: tmeshid
+    real(mk)                            :: Lx2,Ly2,Lz2
+
+    real(mk),dimension(dime)           :: tmpmin,tmpmax
+    integer, dimension(:),pointer           :: maxndataxy=>NULL(),maxndataz=>NULL()
+    integer, dimension(:  ), pointer        :: dummynmxy=>NULL(),dummynmz=>NULL()
+
+    !-------------------------------------------------------------------------
+    ! Initialise routine
+    !-------------------------------------------------------------------------
+    !  CALL substart('ppm_poisson_init',t0,info)
+
+    ppmpoisson%case  = ppm_poisson_grn_pois_per
+
+    !-------------------------------------------------------------------------
+    ! Nullify pointers from the ppmpoisson plans and the fftplans
+    !-------------------------------------------------------------------------
+    NULLIFY(xp)
+    NULLIFY(ppmpoisson%costxy)
+    NULLIFY(ppmpoisson%istartxy)
+    NULLIFY(ppmpoisson%ndataxy)
+    NULLIFY(ppmpoisson%istartxyc)
+    NULLIFY(ppmpoisson%ndataxyc)
+    NULLIFY(ppmpoisson%costz)
+    NULLIFY(ppmpoisson%istartz)
+    NULLIFY(ppmpoisson%ndataz)
+    NULLIFY(ppmpoisson%planfxy%plan)
+    NULLIFY(ppmpoisson%planbxy%plan)
+    NULLIFY(ppmpoisson%planfz%plan)
+    NULLIFY(ppmpoisson%planbz%plan)
+
+    !-------------------------------------------------------------------------
+    ! Get topology and mesh values of input/output
+    !-------------------------------------------------------------------------
+    call ppm_topo_get(topoid,topology,info)
+    mesh  = topology%mesh(meshid)
+
+    !-------------------------------------------------------------------------
+    ! Setup mesh sizes for intermediate meshes/topologies
+    !-------------------------------------------------------------------------
+    !size of real slabs
+    ppmpoisson%nmxy (1) =  mesh%nm(1)
+    ppmpoisson%nmxy (2) =  mesh%nm(2)
+    ppmpoisson%nmxy (3) =  mesh%nm(3)
+    !size of complex slabs
+    ppmpoisson%nmxyc(1) = (mesh%nm(1)-1)/2+1
+    !!ppmpoisson%nmxyc(1) =  mesh%nm(1)
+    ppmpoisson%nmxyc(2) =  mesh%nm(2)
+    ppmpoisson%nmxyc(3) =  mesh%nm(3)
+    !size of complex pencils
+    ppmpoisson%nmz  (1) = (ppmpoisson%nmxyc(1))
+    ppmpoisson%nmz  (2) = (ppmpoisson%nmxyc(2))
+    ppmpoisson%nmz  (3) = (ppmpoisson%nmxyc(3))
+    !size of the fft
+    ppmpoisson%nmfft(1) =  mesh%nm(1)-1
+    ppmpoisson%nmfft(2) =  mesh%nm(2)-1
+    ppmpoisson%nmfft(3) =  mesh%nm(3)-1
+    !Inverse of the size of the domain squared
+    Lx2 = 1.0_mk/(topology%max_physd(1)-topology%min_physd(1))**2
+    Ly2 = 1.0_mk/(topology%max_physd(2)-topology%min_physd(2))**2
+    Lz2 = 1.0_mk/(topology%max_physd(3)-topology%min_physd(3))**2
+
+    !-------------------------------------------------------------------------
+    ! Create temporary derivation arrays if necessary
+    !-------------------------------------------------------------------------
+    ppmpoisson%derivatives = ppm_poisson_drv_curl_sp
+    
+    !-------------------------------------------------------------------------
+    ! Create spectral scaling components always. Just in case some 
+    ! reprojection comes up
+    ! The conditionals need to be for not just the Poisson equation
+    !-------------------------------------------------------------------------
+    ppmpoisson%normkx = &
+         & 2.0_mk*PI/(topology%max_physd(1)-topology%min_physd(1))
+    ppmpoisson%normky = &
+         & 2.0_mk*PI/(topology%max_physd(2)-topology%min_physd(2))
+    ppmpoisson%normkz = &
+         & 2.0_mk*PI/(topology%max_physd(3)-topology%min_physd(3))
+
+    !-------------------------------------------------------------------------
+    ! Create new slab topology
+    !-------------------------------------------------------------------------
+    ttopoid = 0
+    tmeshid = -1
+    decomposition       = ppm_param_decomp_xy_slab
+    assigning           = ppm_param_assign_internal
+    bcdef               = ppm_param_bcdef_periodic
+    tmpmin              = topology%min_physd
+    tmpmax              = topology%max_physd
+
+    CALL ppm_mktopo(ttopoid,tmeshid,xp,0,&
+         & decomposition,assigning,&
+         & tmpmin,tmpmax,bcdef,&
+         & zeros,ppmpoisson%costxy,&
+         & ppmpoisson%nmxy,info)
+
+    ppmpoisson%topoidxy = ttopoid
+    ppmpoisson%meshidxy = tmeshid
+    !-------------------------------------------------------------------------
+    ! Get additional xy-mesh information
+    !-------------------------------------------------------------------------
+    CALL ppm_topo_get_meshinfo(ppmpoisson%topoidxy,ppmpoisson%meshidxy, &
+         & dummynmxy,ppmpoisson%istartxy,ppmpoisson%ndataxy,maxndataxy, &
+         & ppmpoisson%isublistxy,ppmpoisson%nsublistxy,info)
+
+
+    !-------------------------------------------------------------------------
+    ! Create complex slab mesh
+    !-------------------------------------------------------------------------
+    ttopoid = ppmpoisson%topoidxy
+    tmeshid = -1
+    CALL ppm_mesh_define(ttopoid,tmeshid,&
+         & ppmpoisson%nmxyc,ppmpoisson%istartxyc,ppmpoisson%ndataxyc,info)
+    ppmpoisson%meshidxyc = tmeshid
+
+
+    !-------------------------------------------------------------------------
+    ! Create new pencil topology
+    !-------------------------------------------------------------------------
+    ttopoid = 0
+    tmeshid = -1
+    bcdef               = ppm_param_bcdef_periodic
+    assigning       = ppm_param_assign_internal
+    decomposition   = ppm_param_decomp_zpencil
+
+    CALL ppm_mktopo(ttopoid,tmeshid,xp,0,&
+         & decomposition,assigning,&
+         & tmpmin,tmpmax,bcdef,&
+         & zeros,ppmpoisson%costz,&
+         & ppmpoisson%nmz,info)
+
+    ppmpoisson%topoidz = ttopoid
+    ppmpoisson%meshidz = tmeshid
+    !-------------------------------------------------------------------------
+    ! Get additional z-mesh information
+    !-------------------------------------------------------------------------
+    CALL ppm_topo_get_meshinfo(ppmpoisson%topoidz,ppmpoisson%meshidz, &
+         & dummynmz,ppmpoisson%istartz,ppmpoisson%ndataz,maxndataz, &
+         & ppmpoisson%isublistz,ppmpoisson%nsublistz,info)
+
+    !-------------------------------------------------------------------------
+    ! Set and get minimum and maximum indicies
+    !-------------------------------------------------------------------------
+    indl(1) = 1
+    indl(2) = 1
+    indl(3) = 1
+
+    !-------------------------------------------------------------------------
+    ! Allocate real xy slabs
+    !-------------------------------------------------------------------------
+    ALLOCATE(ppmpoisson%fldxyr(dime,&
+         & indl(1):maxndataxy(1),indl(2):maxndataxy(2),indl(3):maxndataxy(3),&
+         & 1:ppmpoisson%nsublistxy),stat=info)
+
+
+    !-------------------------------------------------------------------------
+    ! Set and get minimum and maximum indicies of COMPLEX xy slabs
+    !-------------------------------------------------------------------------
+    indl(1) = 1
+    indl(2) = 1
+    indl(3) = 1
+    indu(1) = 0
+    indu(2) = 0
+    indu(3) = 0
+    DO isub=1,ppmpoisson%nsublistxy
+       isubl = ppmpoisson%isublistxy(isub)
+       indu(1) = MAX(indu(1),ppmpoisson%ndataxyc(1,isubl))
+       indu(2) = MAX(indu(2),ppmpoisson%ndataxyc(2,isubl))
+       indu(3) = MAX(indu(3),ppmpoisson%ndataxyc(3,isubl))
+    ENDDO
+
+
+    !-------------------------------------------------------------------------
+    ! Allocate complex xy slabs
+    !-------------------------------------------------------------------------
+    ALLOCATE(ppmpoisson%fldxyc(dime,&
+         & indl(1):indu(1),indl(2):indu(2),indl(3):indu(3),&
+         & 1:ppmpoisson%nsublistxy),stat=info)
+
+
+    !-------------------------------------------------------------------------
+    ! Allocate two complex z pencils + Greens fcn array !@check return vars.
+    !-------------------------------------------------------------------------
+    ALLOCATE(ppmpoisson%fldzc1(dime,&
+         & indl(1):maxndataz(1),indl(2):maxndataz(2),indl(3):maxndataz(3),&
+         & 1:ppmpoisson%nsublistz),stat=info)
+
+    ALLOCATE(ppmpoisson%fldzc2(dime,&
+         & indl(1):maxndataz(1),indl(2):maxndataz(2),indl(3):maxndataz(3),&
+         & 1:ppmpoisson%nsublistz),stat=info)
+
+
+    !-------------------------------------------------------------------------
+    ! The complex Greens function is always kept on the z-pencil topology
+    !-------------------------------------------------------------------------
+    ALLOCATE(ppmpoisson%fldgrnr(&
+         & indl(1):maxndataz(1),indl(2):maxndataz(2),indl(3):maxndataz(3),&
+         & 1:ppmpoisson%nsublistz),stat=info)
+
+    !-------------------------------------------------------------------------
+    ! Set up xy FFT plans
+    ! The inverse plan takes the returning topology since it has the full size
+    !-------------------------------------------------------------------------
+    CALL ppm_fft_forward_2d(ppmpoisson%topoidxy,ppmpoisson%meshidxy,&
+         & ppmpoisson%planfxy,ppmpoisson%fldxyr,&
+         & ppmpoisson%fldxyc,info)
+
+    CALL ppm_fft_backward_2d(ppmpoisson%topoidxy,ppmpoisson%meshidxy,&
+         & ppmpoisson%planbxy,ppmpoisson%fldxyc,&
+         & ppmpoisson%fldxyr,info)
+
+
+    !-------------------------------------------------------------------------
+    ! Set up z FFT plans
+    !-------------------------------------------------------------------------
+    CALL ppm_fft_forward_1d(ppmpoisson%topoidz,ppmpoisson%meshidz,&
+         & ppmpoisson%planfz,ppmpoisson%fldzc1,&
+         & ppmpoisson%fldzc2,info)
+
+    CALL ppm_fft_backward_1d(ppmpoisson%topoidz,ppmpoisson%meshidz,&
+         & ppmpoisson%planbz,ppmpoisson%fldzc2,&
+         & ppmpoisson%fldzc1,info)
+
+
+    !-------------------------------------------------------------------------
+    ! Compute Greens function. Analytic, periodic
+    !
+    ! (d2_/dx2 + d2_/dy2 + d2_/dz2)psi = -omega     =>
+    ! -4*pi2(kx2 + ky2 + kz2)PSI       = -OMEGA     =>
+    ! PSI                              = 1/(4*pi2)*1/(kx2 + ky2 + kz2)OMEGA
+    !-------------------------------------------------------------------------
+    ! Scaling the spectral coefficients... 
+    ! one minus due to (i*k)^2 and another due to the Poisson equation
+    normfac = 1.0_mk/(4.0_mk*PI*PI * &
+                                !and normalisation of FFTs (full domain) !vertex
+         & real((ppmpoisson%nmfft(1))* &
+         &      (ppmpoisson%nmfft(2))* &
+         &      (ppmpoisson%nmfft(3)),mk))
+    DO isub=1,ppmpoisson%nsublistz
+       isubl=ppmpoisson%isublistz(isub)
+       DO k=1,ppmpoisson%ndataz(3,isubl)
+          DO j=1,ppmpoisson%ndataz(2,isubl)
+             DO i=1,ppmpoisson%ndataz(1,isubl)
+                kx = i-1 + (ppmpoisson%istartz(1,isubl)-1)
+                ky = j-1 + (ppmpoisson%istartz(2,isubl)-1)
+                kz = k-1 + (ppmpoisson%istartz(3,isubl)-1)
+                !This is a nasty way to do this but it is only done once so...:
+                IF (kx .GT. (ppmpoisson%nmfft(1)/2)) kx = kx-(ppmpoisson%nmfft(1))
+                IF (ky .GT. (ppmpoisson%nmfft(2)/2)) ky = ky-(ppmpoisson%nmfft(2))
+                IF (kz .GT. (ppmpoisson%nmfft(3)/2)) kz = kz-(ppmpoisson%nmfft(3))
+                ppmpoisson%fldgrnr(i,j,k,isub) = &
+                     & normfac/(real(kx*kx,mk)*Lx2 &
+                     &        + real(ky*ky,mk)*Ly2 &
+                     &        + real(kz*kz,mk)*Lz2)
+                !Take care of singularity
+                !This is nasty as well
+                IF ((kx*kx+ky*ky+kz*kz) .EQ. 0) THEN
+                   ppmpoisson%fldgrnr(i,j,k,isub) = 0.0_mk
+                ENDIF
+             ENDDO
+          ENDDO
+       ENDDO
+    ENDDO
+  end subroutine mypoisson_init
+
+  subroutine mypoisson_solve(topoid,meshid,ppmpoisson,fieldin,fieldout,gstw,info)
+
+
+    INTEGER, INTENT(IN)                                         :: topoid
+!!! Topology ID
+    INTEGER, INTENT(IN)                                         :: meshid
+!!! Mesh ID
+    TYPE(ppm_poisson_plan),INTENT(INOUT)                        :: ppmpoisson
+!!! The PPM Poisson plan
+    REAL(mk),DIMENSION(:,:,:,:,:),POINTER                   :: fieldin
+!!! Input data field
+    REAL(mk),DIMENSION(:,:,:,:,:),POINTER                   :: fieldout
+!!! Output data field
+    INTEGER,DIMENSION(dime),INTENT(IN)                         :: gstw
+!!! Ghost layer width
+    INTEGER, INTENT(OUT)                                        :: info
+!!! Return status, 0 upon succes
+
+    !-------------------------------------------------------------------------
+    ! Local variables
+    !-------------------------------------------------------------------------
+
+    REAL(mk)                      :: t0
+    INTEGER                           :: isub,isubl
+    INTEGER                           :: i,j,k
+    INTEGER                           :: info2
+    INTEGER                           :: presentcase
+    COMPLEX(mk)                   :: divomega
+    INTEGER                           :: gi,gj,gk
+    COMPLEX(mk)                   :: kx,ky,kz
+    COMPLEX(mk)                   :: phix,phiy,phiz
+    REAL(mk)                      :: normfac
+
+    !-------------------------------------------------------------------------
+    ! Check if we run a different/temporary case
+    !-------------------------------------------------------------------------
+
+    presentcase = ppmpoisson%case
+
+    !-----------------------------------------------------------------------
+    ! Map data globally to the slabs (XY)
+    ! This is where the vorticity is extended and padded with 0 for free-space
+    !-----------------------------------------------------------------------
+    !Initialise
+    CALL ppm_map_field_global(topoid,ppmpoisson%topoidxy,meshid,ppmpoisson%meshidxy,info)
+    !Push the data
+    CALL ppm_map_field_push(topoid,meshid,fieldin,3,info)
+    CALL ppm_map_field_send(info)
+    !Retrieve
+    CALL ppm_map_field_pop(ppmpoisson%topoidxy,ppmpoisson%meshidxy,ppmpoisson%fldxyr,3,zeros,info)
+
+    !-----------------------------------------------------------------------
+    ! Do slab FFT (XY) - use the xy topology as its extent has not been halved
+    !-----------------------------------------------------------------------
+    CALL ppm_fft_execute_2d(ppmpoisson%topoidxy,&
+         & ppmpoisson%meshidxy, ppmpoisson%planfxy, &
+         & ppmpoisson%fldxyr, ppmpoisson%fldxyc, &
+         & info)
+
+    !-----------------------------------------------------------------------
+    ! Map to the pencils (Z)
+    !-----------------------------------------------------------------------
+    !Initialise
+    CALL ppm_map_field_global(&
+         & ppmpoisson%topoidxy, &
+         & ppmpoisson%topoidz, &
+         & ppmpoisson%meshidxyc, &
+         & ppmpoisson%meshidz,info)
+
+    !Push the data
+    CALL ppm_map_field_push(&
+         & ppmpoisson%topoidxy, &
+         & ppmpoisson%meshidxyc,ppmpoisson%fldxyc,3,info)
+    !Send
+    CALL ppm_map_field_send(info)
+
+    !Retrieve
+    CALL ppm_map_field_pop(&
+         & ppmpoisson%topoidz, &
+         & ppmpoisson%meshidz,ppmpoisson%fldzc1, &
+         & 3,zeros,info)
+
+    !-----------------------------------------------------------------------
+    ! Do pencil FFT (Z)
+    !-----------------------------------------------------------------------
+    CALL ppm_fft_execute_1d(ppmpoisson%topoidz,&
+         & ppmpoisson%meshidz, ppmpoisson%planfz, &
+         & ppmpoisson%fldzc1, ppmpoisson%fldzc2, &
+         & info)
+
+
+    !-----------------------------------------------------------------------
+    ! Apply the periodic Greens function
+    !-----------------------------------------------------------------------
+    DO isub=1,ppmpoisson%nsublistz
+       isubl=ppmpoisson%isublistz(isub)
+       DO k=1,ppmpoisson%ndataz(3,isubl)
+          DO j=1,ppmpoisson%ndataz(2,isubl)
+             DO i=1,ppmpoisson%ndataz(1,isubl)
+                ppmpoisson%fldzc2(1,i,j,k,isub) = ppmpoisson%fldgrnr( i,j,k,isub)*&
+                     & ppmpoisson%fldzc2(1,i,j,k,isub)
+                ppmpoisson%fldzc2(2,i,j,k,isub) = ppmpoisson%fldgrnr( i,j,k,isub)*&
+                     & ppmpoisson%fldzc2(2,i,j,k,isub)
+                ppmpoisson%fldzc2(3,i,j,k,isub) = ppmpoisson%fldgrnr( i,j,k,isub)*&
+                     & ppmpoisson%fldzc2(3,i,j,k,isub)
+             ENDDO
+          ENDDO
+       ENDDO
+    ENDDO
+
+
+    !-----------------------------------------------------------------------
+    ! Spectral derivatives
+    ! normkx, etc contains 2pi/Lx
+    !-----------------------------------------------------------------------
+    normfac = 1.0_MK/ REAL((ppmpoisson%nmfft(1))* & !vertex
+         & (ppmpoisson%nmfft(2))* &
+         & (ppmpoisson%nmfft(3)),MK)
+    DO isub=1,ppmpoisson%nsublistz
+       isubl=ppmpoisson%isublistz(isub)
+       DO k=1,ppmpoisson%ndataz(3,isubl)
+          gk = k - 1 + (ppmpoisson%istartz(3,isubl)-1)
+          IF (gk .GT. (ppmpoisson%nmfft(3)/2)) gk = gk-(ppmpoisson%nmfft(3))
+          kz = CMPLX(0.0_MK,REAL(gk,MK),MK)*ppmpoisson%normkz
+          DO j=1,ppmpoisson%ndataz(2,isubl)
+             gj = j - 1 + (ppmpoisson%istartz(2,isubl)-1)
+             IF (gj .GT. (ppmpoisson%nmfft(2)/2)) gj = gj-(ppmpoisson%nmfft(2))
+             ky = CMPLX(0.0_MK,REAL(gj,MK),MK)*ppmpoisson%normky
+             DO i=1,ppmpoisson%ndataz(1,isubl)
+                gi = i - 1 + (ppmpoisson%istartz(1,isubl)-1)
+                IF (gi .GT. (ppmpoisson%nmfft(1)/2)) gi = gi-(ppmpoisson%nmfft(1))
+                kx = CMPLX(0.0_MK,REAL(gi,MK),MK)*ppmpoisson%normkx
+                
+                phix = ppmpoisson%fldzc2(1,i,j,k,isub)
+                phiy = ppmpoisson%fldzc2(2,i,j,k,isub)
+                phiz = ppmpoisson%fldzc2(3,i,j,k,isub)
+                
+                ppmpoisson%fldzc2(1,i,j,k,isub) = (ky*phiz-kz*phiy)
+                ppmpoisson%fldzc2(2,i,j,k,isub) = (kz*phix-kx*phiz)
+                ppmpoisson%fldzc2(3,i,j,k,isub) = (kx*phiy-ky*phix)
+             ENDDO
+          ENDDO
+       ENDDO
+    ENDDO
+
+    !-----------------------------------------------------------------------
+    ! IFFT pencil (Z)
+    !-----------------------------------------------------------------------
+    CALL ppm_fft_execute_1d(ppmpoisson%topoidz,&
+         & ppmpoisson%meshidz, ppmpoisson%planbz, &
+         & ppmpoisson%fldzc2, ppmpoisson%fldzc1, &
+         & info)
+
+    !-----------------------------------------------------------------------
+    ! Map back to slabs (XY)
+    !-----------------------------------------------------------------------
+    !Initialise
+    CALL ppm_map_field_global(&
+         & ppmpoisson%topoidz, &
+         & ppmpoisson%topoidxy, &
+         & ppmpoisson%meshidz, &
+         & ppmpoisson%meshidxyc,info)
+    !Push the data
+    CALL ppm_map_field_push(&
+         & ppmpoisson%topoidz, &
+         & ppmpoisson%meshidz,ppmpoisson%fldzc1,3,info)
+
+    !Send
+    CALL ppm_map_field_send(info)
+
+    !Retrieve
+    CALL ppm_map_field_pop(&
+         & ppmpoisson%topoidxy, &
+         & ppmpoisson%meshidxyc,ppmpoisson%fldxyc, &
+         & 3,zeros,info)
+
+    !-----------------------------------------------------------------------
+    ! IFFT (XY) use the non-reduced topology
+    !-----------------------------------------------------------------------
+    CALL ppm_fft_execute_2d(ppmpoisson%topoidxy,&
+         & ppmpoisson%meshidxy, ppmpoisson%planbxy, &
+         & ppmpoisson%fldxyc, ppmpoisson%fldxyr, &
+         & info)
+
+
+    !-----------------------------------------------------------------------
+    ! Map back to standard topology (XYZ)
+    !-----------------------------------------------------------------------
+    !Initialise
+    CALL ppm_map_field_global(&
+         & ppmpoisson%topoidxy, &
+         & topoid, &
+         & ppmpoisson%meshidxy, &
+         & meshid,info)
+    !Push the data
+    CALL ppm_map_field_push(&
+         & ppmpoisson%topoidxy, &
+         & ppmpoisson%meshidxy,ppmpoisson%fldxyr,3,info)
+
+    !Send
+    CALL ppm_map_field_send(info)
+
+    !-------------------------------------------------------------------------
+    ! FINAL RETRIEVE - Here we do different things depending on the task
+    ! i.e. the receiver varies
+    !-------------------------------------------------------------------------
+    IF ((ppmpoisson%derivatives .EQ. ppm_poisson_drv_curl_fd2 .OR. &
+         &  ppmpoisson%derivatives .EQ. ppm_poisson_drv_curl_fd4) ) THEN
+       CALL ppm_map_field_pop(&
+            & topoid, &
+            & meshid,ppmpoisson%drv_vr, &
+            & 3,gstw,info)
+       !-------------------------------------------------------------------------
+       ! Ghost the temporary array for derivatives (drv_vr)
+       !-------------------------------------------------------------------------
+       CALL ppm_map_field_ghost_get(topoid,meshid,gstw,info)
+       CALL ppm_map_field_push(topoid,meshid,ppmpoisson%drv_vr,3,info)
+       CALL ppm_map_field_send(info)
+       CALL ppm_map_field_pop(topoid,meshid,ppmpoisson%drv_vr,3,gstw,info)
+
+    ELSE
+       CALL ppm_map_field_pop(&
+            & topoid, &
+            & meshid,fieldout, &
+            & 3,gstw,info)
+    ENDIF
+
+    !-------------------------------------------------------------------------
+    ! Treat ghost layer to make FD stencils work
+    !-------------------------------------------------------------------------
+    IF (ppmpoisson%derivatives .EQ. ppm_poisson_drv_curl_fd2) THEN
+       CALL ppm_poisson_extrapolateghost(topoid,meshid,ppmpoisson%drv_vr,&
+            & 2,4,gstw,info)
+    ENDIF
+    IF (ppmpoisson%derivatives .EQ. ppm_poisson_drv_curl_fd4 .AND.&
+         & (presentcase .EQ. ppm_poisson_grn_pois_fre)) THEN
+       CALL ppm_poisson_extrapolateghost(topoid,meshid,ppmpoisson%drv_vr,&
+            & 2,4,gstw,info)
+    ENDIF
+
+    !-------------------------------------------------------------------------
+    ! Optionally do derivatives
+    ! Perhaps make ppm_poisson_fd take _none as argument. Then maybe no
+    ! if-statement is required
+    !-------------------------------------------------------------------------
+    IF (presentcase .NE. ppm_poisson_grn_reprojec) THEN
+       IF (ppmpoisson%derivatives .EQ. ppm_poisson_drv_curl_fd2) THEN
+          CALL ppm_poisson_fd(topoid,meshid,ppmpoisson%drv_vr,fieldout,&
+               & ppm_poisson_drv_curl_fd2,info)
+       ENDIF
+       IF (ppmpoisson%derivatives .EQ. ppm_poisson_drv_curl_fd4) THEN
+          CALL ppm_poisson_fd(topoid,meshid,ppmpoisson%drv_vr,fieldout,&
+               & ppm_poisson_drv_curl_fd4,info)
+       ENDIF
+    ENDIF
+
+    !-------------------------------------------------------------------------
+    ! Finally ghost the velocity/stream function field before returning it
+    ! Also extrapolate if freespace
+    !-------------------------------------------------------------------------
+    CALL ppm_map_field_ghost_get(topoid,meshid,gstw,info)
+    CALL ppm_map_field_push(topoid,meshid,fieldout,3,info)
+    CALL ppm_map_field_send(info)
+    CALL ppm_map_field_pop(topoid,meshid,fieldout,3,gstw,info)
+
+  end subroutine mypoisson_solve
+
+
+end module ppmljk_poisson
diff --git a/HySoP/src/precision.conf.in b/HySoP/src/precision.conf.in
new file mode 100644
index 0000000000000000000000000000000000000000..8583d1d9a82f3197ba9547948befa3a63832d05c
--- /dev/null
+++ b/HySoP/src/precision.conf.in
@@ -0,0 +1,19 @@
+!> Select float precision for the whole code.
+!! This is a generated file, do not edit.
+!! Usage :
+!! cmake -DPRECISION=value ...
+!! with value = simple or value = double
+module precision
+
+  use mpi, only: MPI_DOUBLE_PRECISION, MPI_FLOAT
+  implicit none
+  !> Floats precision
+  integer, parameter  :: SP = kind(1.0)
+  integer, parameter  :: DP = kind(1.0d0)
+  !> Chosen precision (set during config. using -DPRECISION=... with cmake)
+  integer, parameter  :: WP = @PRECISION@
+  !> MPI type for float
+  integer, parameter     :: MPI_REAL_WP = @MPI_PRECISION@
+  !> the string size
+  integer, parameter  :: str_short  = 8
+end module precision
diff --git a/HySoP/src/scalesReduced/layout/cart_mesh_tools.f90 b/HySoP/src/scalesReduced/layout/cart_mesh_tools.f90
new file mode 100644
index 0000000000000000000000000000000000000000..a60573898e1c2298b7de73f6833121a96036b5ce
--- /dev/null
+++ b/HySoP/src/scalesReduced/layout/cart_mesh_tools.f90
@@ -0,0 +1,104 @@
+!USEFORTEST toolbox
+!USEFORTEST postprocess
+!USEFORTEST advec
+!USEFORTEST interpolation
+!USEFORTEST io
+!USEFORTEST topo
+!USEFORTEST avgcond
+!> @addtogroup toolbox
+!! @{
+
+!-----------------------------------------------------------------------------
+!
+! MODULE: cart_mesh_tools
+!
+!
+! DESCRIPTION:
+!>  This module provide a mesh structure. It is used for output and as future
+!! base to deal with different scalar field computed with eventually different
+!! resolutions.
+!
+!> @details
+!!  This module provide structure to save mesh context associated to a field.
+!! This allow to easily work with different resolutions and to know how
+!! mesh interact with the mpi topology.
+!! It provide the different tools to initialise the type to some default
+!! value or to auto-complete it.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module cart_mesh_tools
+
+    use precision_tools
+
+    implicit none
+
+    public
+
+    ! ===== Type =====
+    ! > Information about mesh subdivision and on the global grid
+    type cartesian_mesh
+        !> number of grid points in each direction
+        integer, dimension(3)   :: N
+        !> number of grid point for the local subgrid in each direction
+        integer, dimension(3)   :: N_proc
+        !> information about min and max local indice on the current directory
+        integer, dimension(3,2) :: relative_extend
+        !> information about min and max global indice associated to the current processus
+        integer, dimension(3,2) :: absolute_extend
+        !> space step for field discretisation
+        real(WP), dimension(3)  :: dx
+        !> Physical size
+        real(WP), dimension(3)  :: length
+    end type cartesian_mesh
+
+
+    ! ===== Public procedures =====
+    ! Auto-complete cartesian_mesh data field.
+    public      :: mesh_save
+
+
+contains
+
+!> Auto-complete some field about "cartesian_mesh" variables.
+!>    @param[out]   mesh    = variable of type cartesian_mesh where the data about mesh are save
+!>    @param[in]    Nb      = number of grid point along each direction
+!>    @param[in]    Nb_proc = number of grid point along each direction associated to the current processus
+!>    @param[in]    d_space = space step
+!>    @param[in]    coord   = coordinate of the current processus in the 3D mpi-topology
+subroutine mesh_save(mesh, Nb, Nb_proc, d_space, coord)
+
+    implicit none
+
+    ! Input/Output
+    type(cartesian_mesh), intent(out)       :: mesh
+    integer, dimension(3), intent(in)       :: Nb
+    integer, dimension(3), intent(in)       :: Nb_proc
+    integer, dimension(3), intent(in)       :: coord
+    real(WP), dimension(3), intent(in)      :: d_space
+    ! Other local variables
+    integer                                 :: direction    ! integer matching to a direction (X, Y or Z)
+
+    ! Number of mesh
+    mesh%N = Nb
+    mesh%N_proc = Nb_proc
+
+    ! Relative extend
+    mesh%relative_extend(:,1) = 1
+    mesh%relative_extend(:,2) = Nb_proc
+    ! Absolute one
+    do direction = 1, 3
+        mesh%absolute_extend(direction,:) = coord(1)*Nb_proc(direction) + mesh%relative_extend(direction,:)
+    end do
+
+    ! Space step
+    mesh%dx = d_space
+
+end subroutine mesh_save
+
+
+end module cart_mesh_tools
+!> @}
diff --git a/HySoP/src/scalesReduced/layout/cart_topology.f90 b/HySoP/src/scalesReduced/layout/cart_topology.f90
new file mode 100644
index 0000000000000000000000000000000000000000..c499641ef60cc55232150d84f2d9e3bea4478e52
--- /dev/null
+++ b/HySoP/src/scalesReduced/layout/cart_topology.f90
@@ -0,0 +1,787 @@
+!USEFORTEST toolbox
+!USEFORTEST avgcond
+!USEFORTEST postprocess
+!USEFORTEST advec
+!USEFORTEST interpolation
+!USEFORTEST io
+!USEFORTEST topo
+!> @addtogroup cart_structure
+!! @{
+
+!------------------------------------------------------------------------------
+
+!
+! MODULE: cart_topology
+!
+!
+! DESCRIPTION:
+!>  This module provide a cartesien topology on the parrallel layout.
+!
+!> @details
+!!  This module provide a cartesien topology on the parrallel layout.
+!! This virtual topology is created by the MPI procedures (and thus use
+!! low-level optimisation based on the underlyinfg hardware). It
+!! provides the different tools to create, to manipulate and to interface
+!! it with the other topology and communicators.
+!! The solver use some dimensionnal splitting and this module contains all the
+!! method used to solve advection along the Y-axis. This is a parallel
+!! implementation using MPI and the cartesien topology it provides.
+!!
+!!  Nowaday, the domain is only splitted along Y and Z axis. Therefore,
+!! we only use a 2D cartesian topology.
+!!  A "global" communicator is devoted to the (2D) cartesian structure.
+!! Another communicator is added for each direction in order to deal
+!! with all 1D communication (along Y or Z).
+!! Be careful : the (Y,Z)-axis in the 3D mesh match to the (X,Y) axis on the 2D
+!! mpi-topology.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module cart_topology
+
+    use precision_tools
+    use cart_mesh_tools
+    use mpi, only: MPI_TAG_UB
+
+    implicit none
+
+    ! ===== Structure =====
+    ! ----- Structure to save work item information -----
+    ! This allow to use different resolution more easily.
+    type group_info
+      !> Computation are done by group of line. Here we define their size
+      integer, dimension(3,2)             :: group_size
+      !> To check if group size is initialized
+      logical                             :: group_init = .false.
+      !> To concatenate position in order to create unique mpi message tag
+      integer, dimension(3,2)             :: tag_size
+      !> To concatenate rank in order to create unique mpi message tag
+      integer                             :: tag_rank
+      !> To check if parameter is already initialized
+      logical                             :: mesh_init = .false.
+    end type group_info
+
+
+    ! ===== Public variables =====
+
+    ! ----- Communicators -----
+    !> Communicator associated with the cartesian topology
+    integer, protected                  :: main_comm
+    !> Communicator associated with the cartesian topology
+    integer, protected                  :: cart_comm
+    !> Communicators devoted to 1-dimensionnal subgrids (along Y and Z)
+    integer, protected                  :: X_comm, Y_comm, Z_comm
+    !> Table of the previous communicators (ie comm devoted to 1D subgrids)
+    integer, dimension(3), protected    :: D_comm
+    !> Rank of immediate neighbors
+    integer,dimension(3,-4:4),protected :: neighbors
+    !> Rank of immediate neighbors
+    integer,dimension(1:3,-1:1),protected :: neighbors_cart_topo=0
+
+    ! ----- Information about current MPI processus and MPI topology
+    !> number of processes in each direction
+    integer, dimension(3), protected    :: nb_proc_dim
+    !> rank of current processus (in the cartesian communicator)
+    integer, public                     :: cart_rank
+    !> rank of current processus (in the in communicator associated to the different direction)
+    integer, dimension(3), public       :: D_rank
+    !> coordinate of the current processus
+    integer, dimension(3), protected    :: coord
+    !> YZ coordinate of the current processus
+    integer, dimension(2), protected    :: coordYZ
+    !> Periodic boundary conditions: logical array, equals true if periodic
+    logical, dimension(3),protected     :: periods
+
+    ! ------ Information about mesh subdivision and on the global grid -----
+    !> information about local mesh - for scalar
+    type(cartesian_mesh), protected     :: mesh_sc
+    !> REcopy of mesh_sc%N_proc for python interface
+    integer, dimension(3)               :: N_proc
+    !> Computation are done by group of line. Here we define their size
+    integer, dimension(3,2), protected  :: group_size
+    !> To check if group size is initialized
+    logical, private                    :: group_init = .false.
+    !> To concatenate position in order to create unique mpi message tag
+    integer, dimension(3,2), private    :: tag_size
+    !> To concatenate rank in order to create unique mpi message tag
+    integer, private                    :: tag_rank
+    !> To check if mesh is already initialized
+    logical, private                    :: mesh_init = .false.
+    !> Default mesh resolution
+    integer, parameter                  :: default_size = 80
+    !> information about local mesh - for velocity
+    type(cartesian_mesh), protected     :: mesh_V
+    !> To check if mesh is already initialized
+    logical, private                    :: mesh_velo_init = .false.
+
+
+    ! ==== Public procedures ====
+    ! Creation of the cartesian topology
+    public      :: cart_create
+    ! Initialise mesh information (first part)
+    public      :: discretisation_create
+    public      :: discretisation_default
+    ! Compute tag for mpi message
+    public      :: compute_tag
+    private     :: compute_tag_gap
+    private     :: compute_tag_NP
+    ! Adjust some private variale
+    public      :: set_group_size
+    private     :: set_group_size_1
+    private     :: set_group_size_1x2
+    private     :: set_group_size_3
+    private     :: set_group_size_init
+    ! Create a cartesian_mesh variable related to data save in cart_topolgoy module.
+    public      :: mesh_save_default
+
+    ! ==== Public procedures ====
+    ! Initialise mesh information (second part)
+    private     :: discretisation_init
+
+    interface compute_tag
+        module procedure compute_tag_gap, compute_tag_NP
+    end interface compute_tag
+
+    interface set_group_size
+    !>    Size of group of line is used to gather line together in the particle
+    !! solver. As it is a crucial parameter, it must be possible for user to changed
+    !! it without broke everything (ie if user value is bad it has to be ignored) and
+    !! to be set up by default to a "right and intelligent" value. Use
+    !! set_group_size for this purpose. An optional logical argument "init" is
+    !! used to set up group_size to a default "acceptable" value if user value
+    !! is not acceptable considering mesh size (ie will create bugs).
+        module procedure set_group_size_1, set_group_size_1x2, set_group_size_3, &
+            & set_group_size_init
+    end interface set_group_size
+
+contains
+
+!> Creation of the cartesian mpi topology and (if needed) of all communicators
+!! used for particles method.
+!!    @param[in]    dims        = array specifying the number of processes in each dimension
+!!    @param[in]    spec_comm   = main communicator
+!!    @param[out]   ierr        = error code
+!!    @param[out]   spec_comm   = mpi communicator used by the spectral part of the code (optional).
+!!    @param[in]    topology    = to choose the dimension of the mpi topology (if 0 then none) (optional).
+!! @details
+!!        This subroutine initialzed the mpi topologic and returns the communicator
+!!    that will be used for all the spectral part of the code (ie everything except
+!!    the particles part). If needed, it also initialzed all the mpi context
+!!    used by the particles solver.
+subroutine cart_create(dims, ierr, parent_comm, spec_comm, topology)
+
+    ! Input/Output
+    integer, dimension(:), intent(in)   :: dims
+    integer, intent(out)                :: ierr
+    integer, intent(in)                 :: parent_comm
+    integer, optional, intent(out)      :: spec_comm
+    integer, optional, intent(in)       :: topology
+    ! Other local variables
+    logical                 :: reorganisation                   ! to choose to reordered or not the processus rank.
+    logical, dimension(3)   :: remains_dim                      ! use to create 1D-subdivision : remains_dims(i) equal
+                                                                ! true if the i-th dimension is kept in the subgrid.
+    integer                 :: direction                        ! current direction : 1 = along X, 2 = along Y and 3 = alongZ
+    integer                 :: topology_dim=3                   ! recopy of the optional input "topology".
+    integer                 :: key                              ! to re-order processus in spec_comm
+    integer, dimension(1)   :: nb_proc                          ! total number of processus
+    logical, dimension(1)   :: period_1D = .false.              ! periodicity in case of 1D mpi topology.
+
+    ! Duplicate parent_comm
+    call mpi_comm_dup(parent_comm, main_comm, ierr)
+
+    ! If there is some scalar to advec with particles method, then initialized
+    ! the 2D mpi topology
+    if (present(topology))  then
+        select case (topology)
+            case(0)
+                topology_dim = 0
+            case(1)
+                topology_dim = 1
+            case default
+                topology_dim = 3
+        end select
+    end if
+
+    select case (topology_dim)
+    case (3)
+        ! ===== Create a 2D mpi topology =====
+        ! 2D topology is created and mpi context is initialized for both
+        ! spectral and particles code
+
+        ! --- Creation of the cartesian topology ---
+        reorganisation = .true.
+        periods = .true.
+        if (size(dims)==2) then
+            nb_proc_dim = (/ 1, dims(1), dims(2) /)
+        else if (size(dims)==3) then
+            nb_proc_dim = dims
+            if (nb_proc_dim(1)/=1) then
+                call mpi_comm_rank(main_comm, cart_rank, ierr)
+                if (cart_rank==0) write(*,'(a)') ' XXXXXXXXXX Warning: subdision along X XXXXXXXXXX'
+            end if
+        else
+            call mpi_comm_rank(main_comm, cart_rank, ierr)
+            if (cart_rank==0) then
+                write(*,'(a)') ' XXXXXXXXXX Error - wrong nb of processus XXXXXXXXXX'
+                write(*,'(a,10(x,i0))') ' input argument dims =', dims
+            end if
+            stop
+        end if
+
+        call mpi_cart_create(main_comm, 3, nb_proc_dim, periods, reorganisation, &
+                & cart_comm, ierr)
+
+        ! --- Create 1D communicator ---
+        ! Subdivision in 1D-subgrids and creation of communicator devoted to
+        ! 1D-communication
+        ! Communication along X-axis
+        remains_dim = (/.true., .false., .false. /)
+        call mpi_cart_sub(cart_comm, remains_dim, X_comm, ierr)
+        D_comm(1) = X_comm
+        ! Communication along Y-axis (in the 3D mesh, ie the x-axis on the mpi-topology)
+        remains_dim = (/.false., .true., .false. /)
+        call mpi_cart_sub(cart_comm, remains_dim, Y_comm, ierr)
+        D_comm(2) = Y_comm
+        ! Communication along Z-axis
+        remains_dim = (/ .false., .false., .true. /)
+        call mpi_cart_sub(cart_comm, remains_dim, Z_comm, ierr)
+        D_comm(3) = Z_comm
+
+        ! --- Initialise information about the current processus ---
+        call mpi_comm_rank(cart_comm, cart_rank, ierr)
+        do direction = 1, 3
+            !neighbors on 1D topology
+            call mpi_comm_rank(D_comm(direction), D_rank(direction), ierr)
+            call mpi_cart_shift(D_comm(direction), 0, 1, neighbors(direction,-1), neighbors(direction,1), ierr)
+            call mpi_cart_shift(D_comm(direction), 0, 2, neighbors(direction,-2), neighbors(direction,2), ierr)
+            call mpi_cart_shift(D_comm(direction), 0, 3, neighbors(direction,-3), neighbors(direction,3), ierr)
+            call mpi_cart_shift(D_comm(direction), 0, 3, neighbors(direction,-4), neighbors(direction,4), ierr)
+            neighbors(direction,0) = D_rank(direction)
+            !neighbors on 3D topology
+            call mpi_cart_shift(cart_comm , direction-1, 1, neighbors_cart_topo(direction,-1), neighbors_cart_topo(direction,1), ierr)
+            neighbors_cart_topo(direction,0) = cart_rank
+        end do
+        call mpi_cart_coords(cart_comm, cart_rank, 3, coord, ierr)
+        coordYZ = (/ coord(2), coord(3) /)
+        ! --- Spectral context ---
+        ! Initialized the communicator used on which the spectral part
+        ! will be based.
+        if (present(spec_comm)) then
+            !> Rank numerotation in spectral communicator grow along first
+            !! direction first and then along the second, the opposite of mpi
+            !! rank numerotation. That is why processus are reoder and 2
+            !! communicator are created.
+            !! Example with 4 processus
+            !! coord    // mpi-cart rank    // spec rank
+            !! (0,0,0)  // 0                // 0
+            !! (0,1,0)  // 2                // 1
+            !! (0,0,1)  // 1                // 2
+            !! (0,1,1)  // 3                // 3
+            ! Construct key to reoder
+            key = coord(1) + (coord(2) + coord(3)*nb_proc_dim(2))*nb_proc_dim(1)
+            ! As not split along X, it must be equivalent to "key = coord(2) + coord(3)*nb_proc_dim(2)"
+            ! Construct spectral communicator
+            call mpi_comm_split(cart_comm, 1, key, spec_comm, ierr)
+        end if
+
+    case (1)
+            ! Construct 1D non-periodic mpi topology
+            nb_proc = product(nb_proc_dim)
+            call mpi_cart_create(main_comm, 1, nb_proc, period_1D, reorganisation, &
+                & cart_comm, ierr)
+            ! Use it as spectral communicator.
+            spec_comm = cart_comm
+
+    case default
+        ! ===== Do not use mpi topology =====
+        if (present(spec_comm)) then
+            spec_comm = main_comm
+        end if
+        call mpi_comm_rank(main_comm,cart_rank,ierr)
+    end select
+
+
+    ! Print some minimal information about the topology
+    if (cart_rank == 0) then
+        write(*,'(a)') ''
+        write(*,'(6x,a)') '========== Topology used ========='
+        if (topology_dim == 0) then
+            write(*,'(6x,a)') 'No mpi topology'
+        else
+            write(*,'(6x,i0,a)') topology_dim,'D mpi topology'
+        end if
+        write(*,'(6x,a,i0,x,i0,x,i0)') 'nb of proc along X, Y, Z = ', nb_proc_dim
+        write(*,'(6x,a)') '=================================='
+        write(*,'(a)') ''
+    end if
+
+end subroutine cart_create
+
+!> Create the mesh structure associated to the topology
+!!    @param[in]    Nx          = number of meshes along X
+!!    @param[in]    Ny          = number of meshes along X
+!!    @param[in]    Nz          = number of meshes along X
+!!    @param[in]    Lx          = number of meshes along X
+!!    @param[in]    Ly          = number of meshes along Y
+!!    @param[in]    Lz          = number of meshes along Z
+!!    @param[in]    verbosity   =  logical to unactivate verbosity (show message about group size change or not)
+!! @details
+!!    Initialise the mesh data associated to the mpi topology and used by the
+!!    particle solver
+!!    @author Jean-Baptiste Lagaert
+subroutine discretisation_create(Nx, Ny, Nz, Lx, Ly, Lz, verbosity)
+
+    ! Input/Output
+    integer, intent(in)             :: Nx, Ny, Nz
+    real(WP), intent(in)            :: Lx, Ly, Lz
+    logical, intent(in), optional   :: verbosity    ! To unactivate verbosity
+
+    ! Others
+    logical                 :: show_message
+
+    ! Init verbosity parameter
+    show_message = .true.
+    if(present(verbosity)) show_message = verbosity
+
+    ! A cubic geometry : unitary lengh and 100 mesh points in each direction.
+    mesh_sc%N(1) = Nx
+    mesh_sc%N(2) = Ny
+    mesh_sc%N(3) = Nz
+
+    mesh_sc%length(1)= Lx
+    mesh_sc%length(2)= Ly
+    mesh_sc%length(3)= Lz
+
+    mesh_sc%N_proc = mesh_sc%N / nb_proc_dim
+    N_proc = mesh_sc%N_proc
+    mesh_sc%relative_extend(:,1) = 1
+    mesh_sc%relative_extend(:,2) = mesh_sc%N_proc
+
+    ! Adjust group size :
+    call set_group_size_init()
+    ! Finish init
+    mesh_init = .false.
+    call discretisation_init(show_message)
+
+end subroutine discretisation_create
+
+!> Defaut mesh setup
+!! @author Jean-Baptiste Lagaert
+!!    @param[in]    verbosity   =  logical to unactivate verbosity (show message about group size change or not)
+!! @details
+!!    Initialise the mesh data associated to the mpi topology and used by the
+!!    particle solver to a default 100x100x100 mesh grid.
+subroutine discretisation_default(verbosity)
+
+    logical, intent(in), optional   :: verbosity    ! To unactivate verbosity
+
+    logical                 :: show_message
+
+    ! Init verbosity parameter
+    show_message = .true.
+    if(present(verbosity)) show_message = verbosity
+
+    ! A cubic geometry : unitary lengh and 100 mesh points in each direction.
+    mesh_sc%N = default_size
+    mesh_sc%length = 1.
+    mesh_sc%N_proc = mesh_sc%N / nb_proc_dim
+    N_proc = mesh_sc%N_proc
+    mesh_sc%relative_extend(:,1) = 1
+    mesh_sc%relative_extend(:,2) = mesh_sc%N_proc
+
+    group_init = .false.
+    call set_group_size_init()
+    mesh_init = .false.
+    call discretisation_init(show_message)
+
+end subroutine discretisation_default
+
+!> To initialize some hidden mesh parameters
+!! @author Jean-Baptiste Lagaert
+!!    @param[in]    verbosity   = optional, logical used to unactivate verbosity
+!! @details
+!!        In order to deal well with the mpi topology, the data structure and the
+!!    mesh cut, some other parameters have to be initialised. Some are parameters
+!!    that could not be choose by the user (eg the space step which depend of the
+!!    domain size and the number of mesh) and some other are "hidden" parameter used
+!!    to avoid communication error or to allowed some optimization. For example, it
+!!    include variable used to create unique tag for the different mpi communication,
+!!    to gather line in group and to index these group.
+subroutine discretisation_init(verbosity)
+
+    logical, intent(in), optional   :: verbosity    ! To unactivate verbosity
+
+    integer                 :: direction    ! direction (along X = 1, along Y = 2, along Z = 3)
+    integer                 :: group_dir    ! direction "bis"
+    integer, dimension(3,2) :: N_group      ! number of group on one processus along one direction
+    logical                 :: show_message
+
+    mesh_sc%dx = mesh_sc%length/(mesh_sc%N)
+    show_message = .true.
+    if(present(verbosity)) show_message = verbosity
+
+    ! Compute number of group
+    ! Group of line along X
+    N_group(1,1) = mesh_sc%N_proc(2)/group_size(1,1)
+    N_group(1,2) = mesh_sc%N_proc(3)/group_size(1,2)
+    ! Group of line along X
+    N_group(2,1) = mesh_sc%N_proc(1)/group_size(2,1)
+    N_group(2,2) = mesh_sc%N_proc(3)/group_size(2,2)
+    ! Group of line along X
+    N_group(3,1) = mesh_sc%N_proc(1)/group_size(3,1)
+    N_group(3,2) = mesh_sc%N_proc(2)/group_size(3,2)
+
+    ! tag_size = smallest power of ten to ensure tag_size > max ind_group
+    do direction = 1,3
+        tag_size(direction,:) = 1
+        do group_dir = 1,2
+            do while (N_group(direction, group_dir)/(10**tag_size(direction, group_dir))>1)
+                tag_size(direction, group_dir) = tag_size(direction, group_dir)+1
+            end do
+        end do
+    end do
+
+    tag_rank = 1
+    do while(3*max(nb_proc_dim(1),nb_proc_dim(2),nb_proc_dim(3))/(10**tag_rank)>=1)
+        tag_rank = tag_rank+1
+    end do
+    if (tag_rank == 1) tag_rank = 2
+
+    ! Default velocity mesh = same mesh than scalar
+    mesh_V = mesh_sc
+
+    ! Print some information about mesh used
+    if((cart_rank==0).and.(show_message)) then
+        write(*,'(a)') ''
+        if(mesh_init) then
+            write(*,'(6x,a,a24,a)') 'XXXXXX','group sized changed ','XXXXXX'
+        else
+            write(*,'(6x,a,a30,a)') '-- ','mesh size',' --'
+            write(*,'(6x,a,3(x,i0))') 'global size =',mesh_sc%N
+            write(*,'(6x,a,3(x,i0))') 'local size =',mesh_sc%N_proc
+        end if
+        write(*,'(6x,a,2(x,i0))') 'group size along X =',group_size(1,:)
+        write(*,'(6x,a,2(x,i0))') 'group size along Y =',group_size(2,:)
+        write(*,'(6x,a,2(x,i0))') 'group size along Z =',group_size(3,:)
+        write(*,'(6x,a)') '-- initialisation: tag generation --'
+        do direction = 1,3
+            write(*,'(6x,a,i0,a,i0,x,i0)') 'tag_size(',direction,',:) = ', tag_size(direction,:)
+        end do
+        write(*,'(6x,a,i0)') 'tag_rank = ', tag_rank
+        write(*,'(6x,a)') '------------------------------------'
+        write(*,'(a)') ''
+    end if
+
+    mesh_init = .true.
+
+end subroutine discretisation_init
+
+!> To change velocity resolution
+!!    @param[in] Nx   = number of points along X
+!!    @param[in] Ny   = number of points along Y
+!!    @param[in] Nz   = number of points along Z
+subroutine discretisation_set_mesh_Velo(Nx, Ny, Nz)
+
+    integer, intent(in) :: Nx, Ny, Nz
+
+    mesh_V%N(1) = Nx
+    mesh_V%N(2) = Ny
+    mesh_V%N(3) = Nz
+
+    mesh_V%N_proc = mesh_V%N / nb_proc_dim
+    mesh_V%relative_extend(:,2) = mesh_V%N_proc
+
+    mesh_V%dx = mesh_V%length/(mesh_V%N)
+
+end subroutine discretisation_set_mesh_Velo
+
+!> Compute unique tag for mpi message by concatenation of position (ie line coordinate), proc_gap and unique Id
+!!    @param[in]    ind_group   = indice of current group of line
+!!    @param[in]    tag_param   = couple of int unique for each message (used to create the tag)
+!!    @param[in]    direction   = current direction
+!!    @param[in]    proc_gap    = number of processus between the sender and the receiver
+!!    @return       tag         = unique tag: at each message send during an iteration have a different tag
+!!@details
+!!     Use this procedure to compute tag in order to communicate with a distant processus or/and when
+!!    you will send more then two message. It produce longer tag compute_tag_NP because rather tyo use 0/1 it
+!!    put the gap between the sender and the receiver (ie the number of processus between them) in the tag.
+!!    Using these two procedure allow to obtain more unique tag for communication.
+function compute_tag_gap(ind_group, tag_param, direction,proc_gap) result(tag)
+
+    ! Returned variable
+    integer                             :: tag
+    ! Input/Ouput
+    integer, dimension(2), intent(in)   :: ind_group
+    integer, dimension(2), intent(in)   :: tag_param
+    integer, intent(in)                 :: direction
+    integer, intent(in)                 :: proc_gap
+    ! Other local variables
+    integer                              :: abs_proc_gap ! absolute value of proc_gap
+
+    abs_proc_gap = max(abs(proc_gap),1)
+    tag = (tag_param(1)*10+direction)*(10**(tag_rank+1))
+    if (proc_gap>=0) then
+        tag = tag + proc_gap*10
+    else
+        tag = tag - proc_gap*10 +1
+    end if
+    tag = (tag*(10**tag_size(direction,1)))+(ind_group(1)-1)
+    tag = ((tag*(10**tag_size(direction,2)))+(ind_group(2)-1))
+    tag = (tag*10)+tag_param(2)
+
+    ! As tag can not be to big (it must be a legal integer and smaller than
+    ! maximum mpi tag)
+    if ((tag<0).or.(tag>MPI_TAG_UB))  then
+        !print*, 'tag too big - regenerated'
+        tag = (tag_param(1))*(10**(tag_rank+1))
+        if (proc_gap>=0) then
+            tag = tag + proc_gap*10
+        else
+            tag = tag - proc_gap*10 +1
+        end if
+        tag = tag*(10**tag_size(direction,1))+(ind_group(1)-1)
+        tag = ((tag*(10**tag_size(direction,2)))+(ind_group(2)-1))
+        tag = (tag*10)+tag_param(2)
+        if ((tag<0).or.(tag>MPI_TAG_UB))  then
+            !print*, 'tag very too big - regenerated'
+            tag = (tag_param(1))*(10**(tag_rank+1))
+            if (proc_gap>=0) then
+                tag = tag + proc_gap*10
+            else
+                tag = tag - proc_gap*10 +1
+            end if
+            tag = (tag*10)+tag_param(2)
+            if ((tag<0).or.(tag>MPI_TAG_UB))  then
+                tag = tag_param(1)*10 + tag_param(2)
+                if (proc_gap<0) tag = tag +100
+                !print*, 'rank = ', cart_rank, ' coord = ', coord
+                !print*, 'ind_group = ', ind_group, ' ; tag_param = ', tag_param
+                !print*, 'direction = ', direction, ' gap = ', proc_gap ,' and tag = ', tag
+            end if
+        end if
+    end if
+! XXX Fin aide au debug XXX
+
+end function compute_tag_gap
+
+
+!> Compute unique tag for mpi message by concatenation of position(ie line coordinate), +1 or -1 and unique Id
+!!    @param[in]    ind_group   = indice of current group of line
+!!    @param[in]    tag_param   = couple of int unique for each message (used to create the tag)
+!!    @param[in]    direction   = current direction
+!!    @return       tag_table   = unique couple tag: use tag_table(1) for mesage to previous proc. (or first
+!!                                message ) and tag_table(2) for the other message.
+!!@details
+!!     Use this procedure to compute tag for communication with your neighbor or when only two message are send:
+!!    it produce smaller tag then compute_tag_gap because the gap between sender and receiver are replaced by 1,
+!!    for communicate with previous processus (or first of the two message), or 0, for communication with next
+!!    processus (or the second message). It allow to reuse some unique Id.
+function compute_tag_NP(ind_group, tag_param, direction) result (tag_table)
+
+  ! Returned variable
+    integer, dimension(2)               :: tag_table
+    ! Input/Ouput
+    integer, dimension(2), intent(in)   :: ind_group
+    integer, dimension(2), intent(in)   :: tag_param
+    integer, intent(in)                 :: direction
+
+    tag_table(2) = (tag_param(1)*10+direction)*10
+    tag_table(1) = tag_table(2)
+
+    tag_table(2) = tag_table(2) +1
+
+    tag_table(2) = (tag_table(2)*(10**tag_size(direction,1)))+(ind_group(1)-1)
+    tag_table(1) = (tag_table(1)*(10**tag_size(direction,1)))+(ind_group(1)-1)
+
+    tag_table(2) = ((tag_table(2)*(10**tag_size(direction,2)))+(ind_group(2)-1))
+    tag_table(1) = ((tag_table(1)*(10**tag_size(direction,2)))+(ind_group(2)-1))
+
+    tag_table(2) = (tag_table(2)*10)+tag_param(2)
+    tag_table(1) = (tag_table(1)*10)+tag_param(2)
+
+    ! Check if tag limitations are respected.
+    if ((minval(tag_table)<0).or.(maxval(tag_table)>MPI_TAG_UB))  then
+        tag_table = tag_param(1)*100 + tag_param(2)*10
+        tag_table = tag_table + (/1,2/)
+        !print*, 'rank = ', cart_rank, ' coord = ', coord
+        !print*, 'ind_group = ', ind_group, ' ; tag_param = ', tag_param
+        !print*, 'direction = ', direction, ' and tag = ', tag_table
+    end if
+
+
+end function compute_tag_NP
+
+
+!> Adjust the private variable "group_size": line are gathering on group of same
+!! size undependant from the direction
+!!    @param[in]    s           =  integer such as group will gather sxs lines
+!!    @param[in]    init        =  logical to said if it is a default init of group_size
+!!    @param[in]    verbosity   =  logical to unactivate verbosity (show message about group size change or not)
+!! @details
+!!    Create group of line s x s along the three direction.
+subroutine set_group_size_1(s, init, verbosity)
+
+    integer, intent(in)             :: s
+    logical, intent(in), optional   :: init
+    logical, intent(in), optional   :: verbosity
+
+    if (.not.mesh_init) then
+        group_size = s
+        ! And now group size is initialized !
+        group_init = .true.
+    else
+        if (all(mod(mesh_sc%N_proc,s)==0)) group_size = s
+        if (present(verbosity)) then
+            call discretisation_init(verbosity=verbosity)
+        else
+            call discretisation_init()
+        end if
+    end if
+
+    if (present(init)) call set_group_size(init)
+
+end subroutine set_group_size_1
+
+
+!> Adjust the private variable "group_size": line are gathering on group of same
+!! size undependant from the direction
+!!    @param[in]    s1      =  integer such as group will gather s1 line along first remaining direction
+!!    @param[in]    s2      =  integer such as group will gather s1 line along second remaining direction
+!!    @param[in]    init    =  logical to said if it is a default init of group_size
+!!    @param[in]    verbo   =  logical to unactivate verbosity (show message about group size change or not)
+!! @details
+!!    Created group will gather s1 x s2 lines
+subroutine set_group_size_1x2(s1, s2, init, verbo)
+
+    integer, intent(in)             :: s1, s2
+    logical, intent(in), optional   :: init
+    logical, intent(in), optional   :: verbo
+
+    if (.not. mesh_init) then
+        group_size(:,1) = s1
+        group_size(:,2) = s2
+        ! And now group size is initialized !
+        group_init = .true.
+    else
+        if (all(mod(mesh_sc%N_proc,s1)==0)) group_size(:,1) = s1
+        if (all(mod(mesh_sc%N_proc,s2)==0)) group_size(:,2) = s2
+        if (present(verbo)) then
+            call discretisation_init(verbosity=verbo)
+        else
+            call discretisation_init()
+        end if
+    end if
+
+    if (present(init)) call set_group_size(init)
+
+end subroutine set_group_size_1x2
+
+
+!> Adjust the private variable "group_size": line are gathering on group of a
+!! size depending of the current direction.
+!!    @param[in]    sX =  integer such as group of lines along X will gather sX x sX lines
+!!    @param[in]    sY =  integer such as group of lines along Y will gather sY x sY lines
+!!    @param[in]    sZ =  integer such as group of lines along Z will gather sZ x sX lines
+!!    @param[in]    init    =  logical to said if it is a default init of group_size
+!!    @param[in]    verbo   =  logical to unactivate verbosity (show message about group size change or not)
+subroutine set_group_size_3(sX, sY, sZ, init, verbo)
+
+    integer, intent(in)             :: sX, sY, sZ
+    logical, intent(in), optional   :: init
+    logical, intent(in), optional   :: verbo
+
+    if (.not.mesh_init) then
+        group_size(1,:) = (/sY, sZ/)
+        group_size(2,:) = (/sX, sZ/)
+        group_size(3,:) = (/sX, sY/)
+        ! And now group size is initialized !
+        group_init = .true.
+    else
+        if (all(mod(mesh_sc%N_proc(2:3),sX)==0)) group_size(1,:) = sX
+        if ((mod(mesh_sc%N_proc(1),sY)==0).and.(mod(mesh_sc%N_proc(3),sY)==0)) group_size(2,:) = sY
+        if ((mod(mesh_sc%N_proc(1),sZ)==0).and.(mod(mesh_sc%N_proc(2),sZ)==0)) group_size(3,:) = sZ
+        if (present(verbo)) then
+            call discretisation_init(verbosity=verbo)
+        else
+            call discretisation_init()
+        end if
+    end if
+
+    if (present(init)) call set_group_size(init)
+
+
+end subroutine set_group_size_3
+
+!> Adjust the private variable "group_size": line are gathering on group of same
+!! size undependant from the direction
+!!    @param[in]    init    =  logical to said if it is a default init of group_size
+!! @details
+!!    Create group of acceptable default size (or re-init group size if optional
+!! argument "init" is present and set to true).
+subroutine set_group_size_init(init)
+
+    logical, intent(in), optional   :: init
+
+    ! To check if group size is well defined
+    integer, dimension(3,2)         :: domain_size
+
+    if (present(init)) group_init = init
+
+    if (.not.group_init) then
+        ! Setup the size of line group to a default value
+        if (all(mod(mesh_sc%N_proc,8)==0)) then
+            group_size = 8
+        else if (all(mod(mesh_sc%N_proc,5)==0)) then
+            group_size = 5
+        else if (all(mod(mesh_sc%N_proc,4)==0)) then
+            group_size = 4
+        else if (all(mod(mesh_sc%N_proc,2)==0)) then
+            group_size = 2
+        else
+            group_size = 1
+        end if
+        ! And now group size is initialized !
+        group_init = .true.
+    else
+        domain_size(1,:) = (/mesh_sc%N_proc(2), mesh_sc%N_proc(3)/)
+        domain_size(2,:) = (/mesh_sc%N_proc(1), mesh_sc%N_proc(3)/)
+        domain_size(3,:) = (/mesh_sc%N_proc(1), mesh_sc%N_proc(2)/)
+
+        where (mod(domain_size,group_size)/=0)
+            where(mod(domain_size,8)==0)
+                group_size=8
+            elsewhere(mod(domain_size,5)==0)
+                group_size=5
+            elsewhere(mod(domain_size,4)==0)
+                group_size=4
+            elsewhere(mod(domain_size,2)==0)
+                group_size=2
+            elsewhere
+                group_size=1
+            end where
+        end where
+    end if
+
+end subroutine set_group_size_init
+
+
+!> Save data about the cartesian mesh create in cart_topology module
+!>    @param[out]   mesh    = varialbe of type cartesian_mesh where the data about mesh are save
+subroutine mesh_save_default(mesh)
+
+    ! Input/Output
+    type(cartesian_mesh), intent(out)       :: mesh
+    ! Other local variables
+    !integer                                 :: direction    ! integer matching to a direction (X, Y or Z)
+
+    mesh = mesh_sc
+
+end subroutine mesh_save_default
+
+
+
+end module cart_topology
+!> @}
diff --git a/HySoP/src/scalesReduced/particles/advec.f90 b/HySoP/src/scalesReduced/particles/advec.f90
new file mode 100644
index 0000000000000000000000000000000000000000..a34d30614e394afe8aac3400b5ecb22af2f61ac9
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec.f90
@@ -0,0 +1,531 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec
+!
+!
+! DESCRIPTION:
+!> The module advec provides all public interfaces to solve an advection equation
+!! with a particle method.
+!
+!> @details
+!!     This module contains the generic procedure to initialize and parametrise the
+!! advection solver based on particles method. It also contains the subroutine
+!! "advec_step" wich solves the equation for a given time step. It is the only one
+!! module which is supposed to be included by a code using this library of
+!! particle methods.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+module advec
+
+    use precision_tools
+    use advec_abstract_proc
+    use Interpolation_velo
+    implicit none
+
+    ! ===== Private variables =====
+    !> numerical method use to advect the scalar
+    character(len=str_short), private   :: type_part_solv
+    !> dimensionnal splitting (eg classical, Strang or particle)
+    character(len=str_short), private   :: dim_splitting
+    !> Group size along current direction
+    integer, protected, dimension(2)  :: gsX, gsY, gsZ
+    !> Indice of transverse directions
+    integer, protected                :: gp_dir1, gp_dir2
+    !> Indice of current direction
+    integer, protected                :: line_dir
+
+
+    ! ===== Public procedures =====
+    ! Scheme used to advec the scalar (order 2 or 4 ?)
+    public                              :: type_part_solver
+
+    ! Advection methods
+    public                                          :: advec_init           ! initialize the scalar solver
+    procedure(advec_step_Torder2), pointer, public  :: advec_step           => null()
+    public                                          :: advec_step_Torder1   ! advec the scalar field during a time step.
+    public                                          :: advec_step_Torder2   ! advec the scalar field during a time step.
+
+!TODO passer les pointeurs en protected
+!    ! Remeshing formula
+!    procedure(AC_remesh), pointer, protected        :: advec_remesh         => null()
+!    ! Particle velocity initialisation
+!    procedure(AC_init_p_V), pointer, protected      :: advec_init_velo     => null()
+    ! Remeshing formula
+    procedure(AC_remesh), pointer, public           :: advec_remesh         => null()
+    ! Particle velocity initialisation
+    procedure(AC_init_p_V), pointer, public         :: advec_init_velo     => null()
+
+contains
+
+! ===== Public methods =====
+
+!> Return the name of the particle method used for the advection
+!!    @return type_part_solver      = numerical method used for advection
+function type_part_solver()
+    character(len=str_short)    :: type_part_solver
+
+    type_part_solver = type_part_solv
+end function
+
+!> Initialise the particle advection methods
+!!    @param[in]    order       = to choose the remeshing method (and thus the order)
+!!    @param[out]   stab_coeff  = stability coefficient (condition stability is
+!!                                  dt< stab_coeff/norm_inf(V))
+!!    @param[in]    dim_split   = dimensionnal splitting (eg classical,
+!!                                    Strang splitting or particle splitting)
+!!    @param[in]    verbosity   = to display info about chosen remeshing formula (optional)
+subroutine advec_init(order, stab_coeff, verbosity, dim_split)
+
+    use advec_variables                     ! contains info about solver parameters and others.
+    use cart_topology                       ! Description of mesh and of mpi topology
+    use advecX, only: advecX_remesh_init    ! solver for advection along X
+    use advec_common                        ! some procedures common to advection along all directions
+
+    ! Input/Output
+    character(len=*), optional, intent(in)  ::  order, dim_split
+    logical, optional, intent(in)           ::  verbosity
+    real(WP), optional, intent(out)         ::  stab_coeff
+
+    ! Use default solver if it is not chosen by the user.
+    if(present(order)) then
+        type_part_solv = order
+    else
+        type_part_solv = 'p_O2'
+    end if
+
+    ! Initialize the solver
+    if (present(verbosity)) then
+        call AC_solver_init(type_part_solv, verbosity)
+    else
+        call AC_solver_init(type_part_solv)
+    end if
+
+    ! ===== Choosing the dimensionnal splitting to use =====
+    ! XXX parser le fichier input
+    ! Default dimensionnal splitting if the user do not choose it
+    if(present(dim_split)) then
+        dim_splitting = dim_split
+    else
+        dim_splitting = 'strang'
+    end if
+
+    select case(dim_splitting)
+        case('classic')
+            advec_step => advec_step_Torder1
+            ! Compute stability coefficient
+            if (present(stab_coeff)) stab_coeff = 1.0/(2.0*real(bl_size, WP))
+        case default    ! Strang
+            advec_step => advec_step_Torder2
+            ! Compute stability coefficient - as each dimension is solved in
+            ! dt/2, stab_coef is 2 times bigger
+            if (present(stab_coeff)) stab_coeff = 1.0/(real(bl_size, WP))
+    end select
+
+    ! Call the right remeshing formula
+    select case(type_part_solv)
+        case('p_O2')
+            advec_remesh => AC_remesh_lambda_group ! or Xremesh_O2
+        case('p_O4')
+            advec_remesh => AC_remesh_lambda_group ! or Xremesh_O4
+        case('p_L2')
+            advec_remesh => AC_remesh_limit_lambda_group    ! limited and corrected lambda 2
+        case('p_M4')
+            advec_remesh => AC_remesh_Mprime_group ! Xremesh_Mprime4
+        ! Check if interface is done. Ok in Scales. But needs to
+        ! get diffusion information. Ok for advec_plus variant, but here ?
+        !case('d_M4')
+        !    advec_remesh_plus => AC_remesh_Mprime_group ! Xremesh_Mprime4 with diffusion
+        case('p_M6')
+            advec_remesh => AC_remesh_Mprime_group ! Xremesh_Mprime6
+        case('p_M8')
+            advec_remesh => AC_remesh_Mprime_group ! Xremesh_Mprime6
+        case('p_44')
+            advec_remesh => AC_remesh_Mprime_group ! Lambda 4,4
+        case('p_64')
+            advec_remesh => AC_remesh_Mprime_group ! Lambda 6,4
+        case('p_66')
+            advec_remesh => AC_remesh_Mprime_group ! Lambda 6,6
+        case('p_84')
+            advec_remesh => AC_remesh_Mprime_group ! Lambda 8,4
+        ! To ensure retro-compatibility
+        case('p_L4')
+            advec_remesh => AC_remesh_Mprime_group ! Lambda 4,4
+        case('p_L6')
+            advec_remesh => AC_remesh_Mprime_group ! Lambda 6,6
+        ! Default value
+        case default
+            advec_remesh => AC_remesh_lambda_group ! or Xremesh_O2
+    end select
+
+    call AC_setup_init()
+    call advecX_remesh_init()
+
+    ! Save group size
+    gsX =group_size(1,:)
+    gsY =group_size(2,:)
+    gsZ =group_size(3,:)
+
+
+end subroutine advec_init
+
+
+!> Adjust 1D solver to advect scalar field along X
+subroutine advec_setup_alongX()
+    use advecX, only : advecX_init_group
+    use advec_common    ! Some procedures common to advection along all directions
+    call AC_remesh_setup_alongX()
+    advec_init_velo => advecX_init_group
+    gp_dir1 = 2
+    gp_dir2 = 3
+    line_dir = 1
+end subroutine advec_setup_alongX
+
+!> Adjust 1D solver to advect scalar field along Y
+subroutine advec_setup_alongY()
+    use advecY, only : advecY_init_group
+    use advec_common    ! Some procedures common to advection along all directions
+    call AC_remesh_setup_alongY()
+    advec_init_velo => advecY_init_group
+    line_dir = 2
+    gp_dir1 = 1
+    gp_dir2 = 3
+end subroutine advec_setup_alongY
+
+!> Adjust 1D solver to advect scalar field along Z
+subroutine advec_setup_alongZ()
+    use advecZ, only : advecZ_init_group
+    use advec_common    ! Some procedures common to advection along all directions
+    call AC_remesh_setup_alongZ()
+    advec_init_velo => advecZ_init_group
+    gp_dir1 = 1
+    gp_dir2 = 2
+    line_dir = 3
+end subroutine advec_setup_alongZ
+
+!> Solve advection equation - order 2 - with basic velocity interpolation
+!!    @param[in]        dt          = time step
+!!    @param[in]        Vx          = velocity along x (could be discretised on a bigger mesh then the scalar)
+!!    @param[in]        Vy          = velocity along y
+!!    @param[in]        Vz          = velocity along z
+!!    @param[in,out]    scal        = scalar field to advect
+subroutine advec_step_Inter_basic(dt, Vx, Vy, Vz, scal)
+
+    ! Input/Output
+    real(WP), intent(in)                        :: dt
+    real(WP), dimension(:,:,:), intent(in)      :: Vx, Vy, Vz
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    ! Local
+    real(WP), dimension(:,:,:), allocatable   :: Vx_f, Vy_f, Vz_f
+    integer                                   :: ierr                ! Error code.
+
+    allocate(Vx_f(mesh_sc%N_proc(1),mesh_sc%N_proc(2),mesh_sc%N_proc(3)),stat=ierr)
+    if (ierr/=0) write(6,'(a,i0,a)') '[ERROR] on cart_rank ', cart_rank, ' - not enough memory for Vx_f'
+    allocate(Vy_f(mesh_sc%N_proc(1),mesh_sc%N_proc(2),mesh_sc%N_proc(3)),stat=ierr)
+    if (ierr/=0) write(6,'(a,i0,a)') '[ERROR] on cart_rank ', cart_rank, ' - not enough memory for Vy_f'
+    allocate(Vz_f(mesh_sc%N_proc(1),mesh_sc%N_proc(2),mesh_sc%N_proc(3)),stat=ierr)
+    if (ierr/=0) write(6,'(a,i0,a)') '[ERROR] on cart_rank ', cart_rank, ' - not enough memory for Vz_f'
+
+    call Interpol_3D(Vx, mesh_V%dx, Vx_f, mesh_sc%dx)
+    call Interpol_3D(Vy, mesh_V%dx, Vy_f, mesh_sc%dx)
+    call Interpol_3D(Vz, mesh_V%dx, Vz_f, mesh_sc%dx)
+    if (cart_rank==0) write(6,'(a)') '        [INFO PARTICLES] Interpolation done'
+
+    call advec_step_Torder2(dt, Vx_f, Vy_f, Vz_f, scal)
+
+    deallocate(Vx_f)
+    deallocate(Vy_f)
+    deallocate(Vz_f)
+
+end subroutine advec_step_Inter_basic
+
+
+!> Solve advection equation - order 2 - with more complex velocity interpolation
+!!    @param[in]        dt          = time step
+!!    @param[in]        Vx          = velocity along x (could be discretised on a bigger mesh then the scalar)
+!!    @param[in]        Vy          = velocity along y
+!!    @param[in]        Vz          = velocity along z
+!!    @param[in,out]    scal        = scalar field to advect
+subroutine advec_step_Inter_Two(dt, Vx, Vy, Vz, scal)
+
+    ! Input/Output
+    real(WP), intent(in)                        :: dt
+    real(WP), dimension(:,:,:), intent(in)      :: Vx, Vy, Vz
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    ! Local
+    real(WP), dimension(:,:,:), allocatable   :: Vx_c, Vy_c, Vz_c
+    real(WP), dimension(:,:,:), allocatable   :: Vx_f, Vy_f, Vz_f
+    integer                                   :: ierr                ! Error code.
+
+    allocate(Vx_c(mesh_V%N_proc(1),mesh_sc%N_proc(2),mesh_sc%N_proc(3)),stat=ierr)
+    if (ierr/=0) write(6,'(a,i0,a)') '[ERROR] on cart_rank ', cart_rank, ' - not enough memory for Vx_c'
+    allocate(Vx_f(mesh_sc%N_proc(1),mesh_sc%N_proc(2),mesh_sc%N_proc(3)),stat=ierr)
+    if (ierr/=0) write(6,'(a,i0,a)') '[ERROR] on cart_rank ', cart_rank, ' - not enough memory for Vx_f'
+    allocate(Vy_c(mesh_V%N_proc(2),mesh_sc%N_proc(1),mesh_sc%N_proc(3)),stat=ierr)
+    if (ierr/=0) write(6,'(a,i0,a)') '[ERROR] on cart_rank ', cart_rank, ' - not enough memory for Vy_c'
+    allocate(Vy_f(mesh_sc%N_proc(2),mesh_sc%N_proc(1),mesh_sc%N_proc(3)),stat=ierr)
+    if (ierr/=0) write(6,'(a,i0,a)') '[ERROR] on cart_rank ', cart_rank, ' - not enough memory for Vy_f'
+    allocate(Vz_c(mesh_V%N_proc(3),mesh_sc%N_proc(1),mesh_sc%N_proc(2)),stat=ierr)
+    if (ierr/=0) write(6,'(a,i0,a)') '[ERROR] on cart_rank ', cart_rank, ' - not enough memory for Vz_c'
+    allocate(Vz_f(mesh_sc%N_proc(3),mesh_sc%N_proc(1),mesh_sc%N_proc(2)),stat=ierr)
+    if (ierr/=0) write(6,'(a,i0,a)') '[ERROR] on cart_rank ', cart_rank, ' - not enough memory for Vz_f'
+
+    call Interpol_2D_3D_vect(mesh_sc%dx, mesh_V%dx, Vx, Vy, Vz, Vx_c, Vx_f, Vy_c, Vy_f, Vz_c, Vz_f)
+
+    call advec_setup_alongX()
+    call advec_X_basic_no_com(dt/2.0, gsX, Vx_f, scal)
+    call advec_setup_alongY()
+    call advec_1D_Vcoarse(dt/2.0, gsY, Vy_c, Vy_f, scal)
+    call advec_setup_alongZ()
+    call advec_1D_Vcoarse(dt/2.0, gsZ, Vz_c, Vz_f, scal)
+    call advec_1D_Vcoarse(dt/2.0, gsZ, Vz_c, Vz_f, scal)
+    call advec_setup_alongY()
+    call advec_1D_Vcoarse(dt/2.0, gsY, Vy_c, Vy_f, scal)
+    call advec_setup_alongX()
+    call advec_X_basic_no_com(dt/2.0, gsX, Vx_f, scal)
+
+    deallocate(Vx_f)
+    deallocate(Vy_f)
+    deallocate(Vz_f)
+
+    deallocate(Vx_c)
+    deallocate(Vy_c)
+    deallocate(Vz_c)
+
+end subroutine advec_step_Inter_Two
+
+!> Solve advection equation - order 1 in time (order 2 dimensional splitting)
+!!    @param[in]        dt          = time step
+!!    @param[in]        Vx          = velocity along x (could be discretised on a bigger mesh then the scalar)
+!!    @param[in]        Vy          = velocity along y
+!!    @param[in]        Vz          = velocity along z
+!!    @param[in,out]    scal        = scalar field to advect
+subroutine advec_step_Torder1(dt, Vx, Vy, Vz, scal)
+
+    ! Input/Output
+    real(WP), intent(in)                        :: dt
+    real(WP), dimension(:,:,:), intent(in)      :: Vx, Vy, Vz
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+
+    call advec_setup_alongX()
+    call advec_X_basic_no_com(dt, gsX, Vx, scal)
+    call advec_setup_alongY()
+    call advec_1D_basic(dt, gsY, Vy, scal)
+    call advec_setup_alongZ()
+    call advec_1D_basic(dt, gsZ, Vz, scal)
+
+end subroutine advec_step_Torder1
+
+
+!> Solve advection equation - order 2 in time (order 2 dimensional splitting)
+!!    @param[in]        dt          = time step
+!!    @param[in]        Vx          = velocity along x (could be discretised on a bigger mesh then the scalar)
+!!    @param[in]        Vy          = velocity along y
+!!    @param[in]        Vz          = velocity along z
+!!    @param[in,out]    scal        = scalar field to advect
+subroutine advec_step_Torder2(dt, Vx, Vy, Vz, scal)
+
+    ! Input/Output
+    real(WP), intent(in)                        :: dt
+    real(WP), dimension(:,:,:), intent(in)      :: Vx, Vy, Vz
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+
+    call advec_setup_alongX()
+    call advec_X_basic_no_com(dt/2.0, gsX, Vx, scal)
+    call advec_setup_alongY()
+    call advec_1D_basic(dt/2.0, gsY, Vy, scal)
+    call advec_setup_alongZ()
+    call advec_1D_basic(dt/2.0, gsZ, Vz, scal)
+    call advec_1D_basic(dt/2.0, gsZ, Vz, scal)
+    call advec_setup_alongY()
+    call advec_1D_basic(dt/2.0, gsY, Vy, scal)
+    call advec_setup_alongX()
+    call advec_X_basic_no_com(dt/2.0, gsX, Vx, scal)
+
+end subroutine advec_step_Torder2
+
+
+!> Scalar advection along one direction - variant for cases with no communication
+!!    @param[in]        dt          = time step
+!!    @param[in]        V_comp      = velocity along X (could be discretised on a bigger mesh then the scalar)
+!!    @param[in,out]    scal3D      = scalar field to advect
+!> Details
+!!   Work only for direction = X. Basic (and very simple) remeshing has just to
+!! be add for other direction.
+subroutine advec_X_basic_no_com(dt, gs, V_comp, scal3D)
+
+    use advecX          ! Procedure specific to advection along X
+    use advec_common    ! Some procedures common to advection along all directions
+    use advec_variables ! contains info about solver parameters and others.
+    use cart_topology   ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    real(WP), intent(in)                          :: dt
+    integer, dimension(2), intent(in)             :: gs
+    real(WP), dimension(:,:,:), intent(in)        :: V_comp
+    real(WP), dimension(:,:,:), intent(inout)     :: scal3D
+    ! Other local variables
+    integer                                             :: j,k          ! indice of the currend mesh point
+    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP),dimension(mesh_sc%N_proc(line_dir),gs(1),gs(2))  :: p_pos_adim   ! adimensionned particles position
+    real(WP),dimension(mesh_sc%N_proc(line_dir)+1,gs(1),gs(2)):: p_V          ! particles velocity
+
+    ind_group = 0
+
+! Work only for X direction - add no_com remeshing along Y and Z to use it for
+! advection along theses directions.
+    line_dir = 1
+    gp_dir1 = 2
+    gp_dir2 = 3
+
+    do k = 1, mesh_sc%N_proc(gp_dir2), gs(2)
+        ind_group(2) = ind_group(2) + 1
+        ind_group(1) = 0
+        do j = 1, mesh_sc%N_proc(gp_dir1), gs(1)
+            ind_group(1) = ind_group(1) + 1
+
+            ! ===== Init particles =====
+            ! p_pos is used to store velocity at grid point
+            call advec_init_velo(V_comp, j, k, gs, p_pos_adim)
+            ! p_V = middle point position = position at middle point for RK2 scheme
+            call AC_get_p_pos_adim(p_V, p_pos_adim, 0.5_WP*dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+
+            ! ===== Advection =====
+            ! -- Compute velocity (with a RK2 scheme): p_V = velocity at middle point position --
+            ! Note that p_pos is used as velocity component storage
+            call AC_interpol_lin_no_com(line_dir, gs, p_pos_adim, p_V)
+            ! p_v = velocity at middle point position
+            ! -- Push particles --
+            call AC_get_p_pos_adim(p_pos_adim, p_V, dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+            ! Now p_pos = particle position and p_V = particle velocity
+
+            ! ===== Remeshing =====
+            call advecX_remesh_no_com(ind_group, gs, p_pos_adim, p_V, j, k, scal3D, dt)
+
+        end do
+    end do
+
+end subroutine advec_X_basic_no_com
+
+
+!> Scalar advection along one direction (this procedure call the right solver, depending on the simulation setup)
+!!    @param[in]        dt          = time step
+!!    @param[in]        gs          = size of the work item along transverse direction
+!!    @param[in]        V_comp      = velocity component
+!!    @param[in,out]    scal3D      = scalar field to advect
+subroutine advec_1D_basic(dt, gs, V_comp, scal3D)
+
+    use advecX, only : advecX_init_group    ! procdure devoted to advection along Z
+    use advecY, only : advecY_init_group    ! procdure devoted to advection along Z
+    use advecZ, only : advecZ_init_group    ! procdure devoted to advection along Z
+    use advec_variables ! contains info about solver parameters and others.
+    use cart_topology   ! Description of mesh and of mpi topology
+    use advec_common    ! some procedures common to advection along all line_dirs
+
+    ! Input/Output
+    real(WP), intent(in)                          :: dt
+    integer, dimension(2), intent(in)             :: gs
+    real(WP), dimension(:,:,:), intent(in)        :: V_comp
+    real(WP), dimension(:,:,:), intent(inout)     :: scal3D
+    ! Other local variables
+    integer                                       :: i,j          ! indice of the currend mesh point
+    integer, dimension(2)                         :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP), dimension(mesh_sc%N_proc(line_dir),gs(1),gs(2))  :: p_pos_adim ! adimensionned particles position
+    real(WP), dimension(mesh_sc%N_proc(line_dir)+1,gs(1),gs(2)):: p_V        ! particles velocity
+
+    ind_group = 0
+
+    do j = 1, mesh_sc%N_proc(gp_dir2), gs(2)
+        ind_group(2) = ind_group(2) + 1
+        ind_group(1) = 0
+        do i = 1, mesh_sc%N_proc(gp_dir1), gs(1)
+            ind_group(1) = ind_group(1) + 1
+
+            ! ===== Init particles =====
+            call advec_init_velo(V_comp, i, j, gs, p_pos_adim)
+            ! p_pos is used to store velocity at grid point
+            call AC_get_p_pos_adim(p_V, p_pos_adim, 0.5_WP*dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+            ! p_V = middle point position = position at middle point for RK2 scheme
+
+            ! ===== Advection =====
+            ! -- Compute velocity (with a RK2 scheme) --
+            ! Note that p_pos is used as velocity component storage
+            call AC_interpol_lin(line_dir, gs, ind_group, p_pos_adim, p_V)
+            ! p_v = velocity at middle point position
+            ! -- Push particles --
+            call AC_get_p_pos_adim(p_pos_adim, p_V, dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+            ! Now p_pos = particle position and p_V = particle velocity
+
+            ! ===== Remeshing =====
+            call advec_remesh(line_dir, ind_group, gs, p_pos_adim, p_V, i,j,scal3D, dt)
+
+        end do
+    end do
+
+end subroutine advec_1D_basic
+
+
+!> Scalar advection along one direction (this procedure call the right solver, depending on the simulation setup)
+!!    @param[in]        dt          = time step
+!!    @param[in]        gs          = size of the work item along transverse direction
+!!    @param[in]        V_comp      = velocity component
+!!    @param[in,out]    scal3D      = scalar field to advect
+subroutine advec_1D_Vcoarse(dt, gs, V_coarse, V_fine, scal3D)
+
+    use advecX, only : advecX_init_group    ! procdure devoted to advection along Z
+    use advecY, only : advecY_init_group    ! procdure devoted to advection along Z
+    use advecZ, only : advecZ_init_group    ! procdure devoted to advection along Z
+    use advec_variables ! contains info about solver parameters and others.
+    use cart_topology   ! Description of mesh and of mpi topology
+    use advec_common    ! some procedures common to advection along all line_dirs
+
+    ! Input/Output
+    real(WP), intent(in)                          :: dt
+    integer, dimension(2), intent(in)             :: gs
+    real(WP), dimension(:,:,:), intent(in)        :: V_coarse
+    real(WP), dimension(:,:,:), intent(in)        :: V_fine
+    real(WP), dimension(:,:,:), intent(inout)     :: scal3D
+    ! Other local variables
+    integer                                       :: i,j          ! indice of the currend mesh point
+    integer, dimension(2)                         :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP), dimension(mesh_sc%N_proc(line_dir),gs(1),gs(2))  :: p_pos_adim ! adimensionned particles position
+    real(WP), dimension(mesh_sc%N_proc(line_dir)+1,gs(1),gs(2)):: p_V        ! particles velocity
+
+    ind_group = 0
+
+    do j = 1, mesh_sc%N_proc(gp_dir2), gs(2)
+        ind_group(2) = ind_group(2) + 1
+        ind_group(1) = 0
+        do i = 1, mesh_sc%N_proc(gp_dir1), gs(1)
+            ind_group(1) = ind_group(1) + 1
+
+            ! ===== Init particles =====
+            call AC_get_p_pos_adim(p_V, V_fine, 0.5_WP*dt, &
+                  & mesh_sc%dx(line_dir), mesh_V%dx(line_dir), mesh_sc%N_proc(line_dir), i, j)
+            ! p_V = middle point position = position at middle point for RK2 scheme
+
+            ! ===== Advection =====
+            ! -- Compute velocity (with a RK2 scheme) --
+            call AC_interpol_plus(line_dir, gs, ind_group, i, j, V_coarse, p_V)
+            ! p_v = velocity at middle point position
+            ! -- Push particles --
+            call AC_get_p_pos_adim(p_pos_adim, p_V, dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+            ! Now p_pos = particle position and p_V = particle velocity
+
+            ! ===== Remeshing =====
+            call advec_remesh(line_dir, ind_group, gs, p_pos_adim, p_V, i,j,scal3D, dt)
+
+        end do
+    end do
+
+end subroutine advec_1D_Vcoarse
+
+
+!> ===== Private procedure =====
+end module advec
+!> @}
diff --git a/HySoP/src/scalesReduced/particles/advecX.f90 b/HySoP/src/scalesReduced/particles/advecX.f90
new file mode 100644
index 0000000000000000000000000000000000000000..72b641ed0794760677bf8a2d541f40d48a4e647c
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advecX.f90
@@ -0,0 +1,780 @@
+!USEFORTEST advec
+!> @addtogroup part
+
+!------------------------------------------------------------------------------
+!
+! MODULE: advecX
+!
+!
+! DESCRIPTION:
+!> The module advecX is devoted to the advection along X axis of a scalar field.
+!! It used particle method and provides a parallel implementation.
+!
+!> @details
+!! This module is a part of the advection solver based on particles method.
+!! The solver uses some dimensionnal splitting and this module contains all the
+!! method used to solve advection along the X-axis. This is a parallel
+!! implementation using MPI and the cartesien topology it provides.
+!!
+!! This module can use the method and variables defined in the module
+!! "advec_common" which gather information and tools shared for advection along
+!! x, y and z-axis.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advecX
+
+    use precision_tools
+    use advec_abstract_proc
+    implicit none
+
+    ! ===== Public procedures =====
+    public  :: advecX_init_group! initialisation for a group of line of particles
+    public  :: advecX_remesh_buffer_to_scalar
+
+    ! -- Init remeshing context --
+    public  :: advecX_remesh_init
+
+    ! -- Remeshing algorithm --
+    public  :: advecX_remesh_type_no_com
+    public  :: advecX_remesh_limited_no_com
+    public  :: advecX_remesh_no_type_no_com
+    public  :: advecX_remesh_in_buffer_lambda
+    public  :: advecX_remesh_in_buffer_limit_lambda
+    public  :: advecX_remesh_in_buffer_Mprime
+
+    ! ===== Private procedures =====
+    ! -- Compute limitator --
+    public :: advecX_limitator_group
+    !private:: advecX_limitator_group_no_com
+
+    procedure(advecX_remesh_type_no_com), pointer, public   ::  advecX_remesh_no_com => null()
+
+    ! ===== Private procedures =====
+
+    ! ===== Private variable ====
+    !> Current direction = along X
+    integer, private, parameter     :: direction=1
+
+contains
+
+! #####################################################################################
+! #####                                                                           #####
+! #####                         Public procedure                                  #####
+! #####                                                                           #####
+! #####################################################################################
+
+
+! =============================================================
+! ====================   Remeshing tools   ====================
+! =============================================================
+
+subroutine advecX_remesh_init()
+
+    use advec_variables         ! solver context
+
+    select case(trim(type_solv))
+    case ('p_M8')
+        advecX_remesh_no_com => advecX_remesh_no_type_no_com
+    case ('p_M6')
+        !advecX_remesh_com => advecX_remesh_in_buffer_Mprime6
+        advecX_remesh_no_com => advecX_remesh_no_type_no_com
+    case ('p_M4')
+        !advecX_remesh_com => advecX_remesh_in_buffer_Mprime6
+        advecX_remesh_no_com => advecX_remesh_no_type_no_com
+    case ('d_M4')
+        !advecX_remesh_com => advecX_remesh_in_buffer_Mprime6
+        advecX_remesh_no_com => advecX_remesh_no_type_no_com
+    case ('p_L2')
+        !advecX_remesh_com => advecX_remesh_in_buffer_limited
+        advecX_remesh_no_com => advecX_remesh_limited_no_com
+    case ('p_44') ! Lambda 4_4
+        advecX_remesh_no_com => advecX_remesh_no_type_no_com
+    case ('p_64') ! Lambda 6_4
+        advecX_remesh_no_com => advecX_remesh_no_type_no_com
+    case ('p_66') ! Lambda 6_6
+        advecX_remesh_no_com => advecX_remesh_no_type_no_com
+    case ('p_84') ! Lambda 8_4
+        advecX_remesh_no_com => advecX_remesh_no_type_no_com
+    ! To ensure retro-compatibility
+    case ('p_L4')
+        advecX_remesh_no_com => advecX_remesh_no_type_no_com
+    case ('p_L6')
+        advecX_remesh_no_com => advecX_remesh_no_type_no_com
+    ! Default value
+    case default
+        !advecX_remesh_com => advecX_remesh_in_buffer_gp
+        advecX_remesh_no_com => advecX_remesh_type_no_com
+    end select
+
+end subroutine advecX_remesh_init
+
+
+!> Remesh particle inside a buffer. Use corrected lambda remeshing polynoms.
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        ind_min     = indices from the original array "pos_in_buffer" does not start from 1.
+!!                                    It actually start from ind_min and to avoid access out of range,
+!!                                    a gap of (-ind_min) will be added to each indices from "pos_in_buffer.
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        scalar      = the initial scalar field transported by particles
+!!    @param[out]       buffer      = buffer where particles are remeshed
+!!    @param[in]        remesh_line = subroutine wich remesh a line of particle with the right remeshing formula
+subroutine advecX_remesh_in_buffer_lambda(gs, j, k, ind_min, p_pos_adim, bl_type, bl_tag, send_min, send_max, &
+        & scalar, buffer, pos_in_buffer)
+
+    use cart_topology           ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_abstract_proc     ! profile of generic procedure
+    use advec_remeshing_lambda  ! remeshing formula and wrapper for a line of particles
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                   :: gs
+    integer, intent(in)                                 :: j, k
+    integer, intent(in)                                 :: ind_min
+    real(WP), dimension(:,:,:), intent(in)              :: p_pos_adim   ! adimensionned particles position
+    logical, dimension(:,:,:), intent(in)               :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(:,:,:), intent(in)               :: bl_tag       ! indice of tagged particles
+    integer, dimension(:,:), intent(in)                 :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)                 :: send_max     ! distance between me and processus wich send me information
+    real(WP), dimension(:,:,:), intent(inout)           :: scalar       ! the initial scalar field transported by particles
+    real(WP),dimension(:), intent(out), target          :: buffer       ! buffer where particles are remeshed
+    integer, dimension(:), intent(inout)                :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
+                                                                        ! in part corresponding to different processes
+
+    ! Other local variables
+    integer                                 :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the
+    type(real_pter),dimension(:),allocatable:: remeshX_pter  ! pointer to send buffer in which scalar are sorted by line indice.
+                                                            ! sorted by receivers
+    integer                                 :: i1, i2       ! indice of a line into the group
+    integer                                 :: ind          ! indice of the current particle inside the current line.
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_min(i1,i2)
+            send_j_max = send_max(i1,i2)
+
+            ! -- Allocate remeshX_pter --
+            allocate(remeshX_pter(send_j_min:send_j_max))
+            do ind = send_j_min, send_j_max
+                proc_gap = floor(real(ind-1, WP)/mesh_sc%N_proc(direction)) - (ind_min-1)
+                remeshX_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
+                pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+            end do
+
+            ! -- Remesh the particles in the buffer --
+            call AC_remesh_lambda_pter(direction, p_pos_adim(:,i1,i2), scalar(:,j+i1-1,k+i2-1), &
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, remeshX_pter)
+
+            deallocate(remeshX_pter)
+        end do
+    end do
+
+    ! Scalar must be re-init before ending the remeshing
+    scalar(:,j:j+gs(1)-1,k:k+gs(2)-1) = 0
+
+end subroutine advecX_remesh_in_buffer_lambda
+
+
+!> Remesh particle inside a buffer. Use corrected lambda remeshing polynoms.
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in]        ind_min     = indices from the original array "pos_in_buffer" does not start from 1.
+!!                                    It actually start from ind_min and to avoid access out of range,
+!!                                    a gap of (-ind_min) will be added to each indices from "pos_in_buffer.
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        bl_type     = table of blocks type (center of left)
+!!    @param[in]        bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
+!!                                    and the begining of the following one is tagged)
+!!    @param[in]        limit       = limitator function
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        scalar      = the initial scalar field transported by particles
+!!    @param[out]       buffer      = buffer where particles are remeshed
+!!    @param[in,out]    pos_in_buffer   = information about where remesing the particle inside the buffer
+subroutine advecX_remesh_in_buffer_limit_lambda(gs, j, k, ind_min, p_pos_adim, bl_type, bl_tag, limit,  &
+        & send_min, send_max, scalar, buffer, pos_in_buffer)
+
+    use cart_topology           ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_abstract_proc     ! profile of generic procedure
+    use advec_remeshing_lambda  ! remeshing formula and wrapper for a line of particles
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                   :: gs
+    integer, intent(in)                                 :: j, k
+    integer, intent(in)                                 :: ind_min
+    real(WP), dimension(:,:,:), intent(in)              :: p_pos_adim   ! adimensionned particles position
+    logical, dimension(:,:,:), intent(in)               :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(:,:,:), intent(in)               :: bl_tag       ! indice of tagged particles
+    real(WP), dimension(:,:,:), intent(in)              :: limit        ! limitator function (divided by 8)
+    integer, dimension(:,:), intent(in)                 :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)                 :: send_max     ! distance between me and processus wich send me information
+    real(WP), dimension(:,:,:), intent(inout)           :: scalar       ! the initial scalar field transported by particles
+    real(WP),dimension(:), intent(out), target          :: buffer       ! buffer where particles are remeshed
+    integer, dimension(:), intent(inout)                :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
+                                                                        ! in part corresponding to different processes
+
+    ! Other local variables
+    integer                                 :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the
+    type(real_pter),dimension(:),allocatable:: remeshX_pter  ! pointer to send buffer in which scalar are sorted by line indice.
+                                                            ! sorted by receivers
+    integer                                 :: i1, i2       ! indice of a line into the group
+    integer                                 :: ind          ! indice of the current particle inside the current line.
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_min(i1,i2)
+            send_j_max = send_max(i1,i2)
+
+            ! -- Allocate remeshX_pter --
+            allocate(remeshX_pter(send_j_min:send_j_max))
+            do ind = send_j_min, send_j_max
+                proc_gap = floor(real(ind-1, WP)/mesh_sc%N_proc(direction)) - (ind_min-1)
+                remeshX_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
+                pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+            end do
+
+            ! -- Remesh the particles in the buffer --
+            call AC_remesh_lambda2limited_pter(direction, p_pos_adim(:,i1,i2), scalar(:,j+i1-1,k+i2-1), &
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, limit(:,i1,i2), remeshX_pter)
+
+            deallocate(remeshX_pter)
+        end do
+    end do
+
+    ! Scalar must be re-init before ending the remeshing
+    scalar(:,j:j+gs(1)-1,k:k+gs(2)-1) = 0
+
+end subroutine advecX_remesh_in_buffer_limit_lambda
+
+
+!> Remesh particle inside a buffer - for M'6 or M'8 remeshing formula.
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        ind_min     = indices from the original array "pos_in_buffer" does not start from 1.
+!!                                    It actually start from ind_min and to avoid access out of range,
+!!                                    a gap of (-ind_min) will be added to each indices from "pos_in_buffer.
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        scalar      = the initial scalar field transported by particles
+!!    @param[out]       buffer      = buffer where particles are remeshed
+subroutine advecX_remesh_in_buffer_Mprime(gs, j, k, ind_min, p_pos_adim, send_min, send_max, &
+        & scalar, buffer, pos_in_buffer)
+
+    use cart_topology     ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_abstract_proc     ! profile of generic procedure
+    use advec_remeshing_Mprime  ! remeshing formula and wrapper for a line of particles
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                   :: gs
+    integer, intent(in)                                 :: j, k
+    integer, intent(in)                                 :: ind_min
+    real(WP), dimension(:,:,:), intent(in)              :: p_pos_adim   ! adimensionned particles position
+    integer, dimension(:,:), intent(in)                 :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)                 :: send_max     ! distance between me and processus wich send me information
+    real(WP), dimension(:,:,:), intent(inout)           :: scalar       ! the initial scalar field transported by particles
+    real(WP),dimension(:), intent(out), target          :: buffer       ! buffer where particles are remeshed
+    integer, dimension(:), intent(inout)                :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
+                                                                        ! in part corresponding to different processes
+
+    ! Other local variables
+    integer                                 :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the
+    type(real_pter),dimension(:),allocatable:: remeshX_pter  ! pointer to send buffer in which scalar are sorted by line indice.
+                                                            ! sorted by receivers
+    integer                                 :: i1, i2       ! indice of a line into the group
+    integer                                 :: ind          ! indice of the current particle inside the current line.
+    !! real(WP), dimension(mesh_sc%N_proc(direction))  :: pos_translat ! translation of p_pos_adim as array indice
+    !!                                                        ! are now starting from 1 and not ind_min
+
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+
+            ! -- Allocate remeshX_pter --
+            allocate(remeshX_pter(send_min(i1,i2):send_max(i1,i2)))
+            do ind = send_min(i1,i2), send_max(i1,i2)
+                proc_gap = floor(real(ind-1, WP)/mesh_sc%N_proc(direction)) - (ind_min-1)
+                remeshX_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
+                pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+            end do
+
+            !! pos_translat = p_pos_adim(:,i1,i2) - send_min(i1,i2) + 1
+            !! Index translation is performed in the AC_remesh_Mprime_pter subroutine on the
+            !! integer adimensionned particle position instead of here on the float position
+
+            ! -- Remesh the particles in the buffer --
+            do ind = 1, mesh_sc%N_proc(direction)
+                call AC_remesh_Mprime_pter(p_pos_adim(ind,i1,i2), 1-send_min(i1,i2), scalar(ind,j+i1-1,k+i2-1), remeshX_pter)
+            end do
+
+            deallocate(remeshX_pter)
+        end do
+    end do
+
+    ! Scalar must be re-init before ending the remeshing
+    scalar(:,j:j+gs(1)-1,k:k+gs(2)-1) = 0
+
+end subroutine advecX_remesh_in_buffer_Mprime
+
+
+!> Remesh particle inside a buffer - for corrected lambda 2 or lambda 4 - no communication variant
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        dt          = time step (needed for tag and type)
+!!    @param[in]        p_V         = particle velocity to compute block type and tag
+!!    @param[in,out]    scal        = scalar field to advect
+subroutine advecX_remesh_type_no_com(ind_group, gs, p_pos_adim, p_V, j, k , scal, dt)
+
+    use cart_topology           ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_correction        ! Some procedures common to advection along all directions
+    use advec_remeshing_lambda  ! remeshing formula and wrapper for a line of particles
+
+    ! Input/Output
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP),dimension(:,:,:),intent(inout)     :: scal
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(inout)   :: p_V          ! particles velocity
+    real(WP), intent(in)                        :: dt
+
+    ! Other local variables
+    integer                                             :: i1, i2       ! position insde the group of line
+    ! Type and block
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+    integer                                 :: N_loc
+
+    N_loc = size(scal,1)
+
+    ! ===== Pre-Remeshing: Determine blocks type and tag particles =====
+    call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+
+    ! ===== Initialize the general buffer =====
+
+    do i1 = 1, gs(1)
+        do i2 = 1, gs(2)
+
+            ! -- [re-] init buffer --
+            !remesh_buffer = 0
+            p_V(:,1,1) = 0
+
+            ! -- Remesh the particles in the buffer --
+            call AC_remesh_lambda_array(direction, p_pos_adim(:,i1,i2), &
+                & scal(:,j+i1-1,k+i2-1), bl_type(:,i1,i2), bl_tag(:,i1,i2), p_V(:,1,1))
+                !& scal(:,j+i1-1,k+i2-1), bl_type(:,i1,i2), bl_tag(:,i1,i2), remesh_buffer)
+
+            ! -- Update scalar from buffer --
+            !scal(:,i1+j-1,k+i2-1) = remesh_buffer
+            scal(:,i1+j-1,k+i2-1) = p_V(1:N_loc,1,1)
+        end do
+    end do
+
+end subroutine advecX_remesh_type_no_com
+
+
+!> Remesh particle inside a buffer - for limited and corrected lambda 2 - no communication variant
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        dt          = time step (needed for tag and type)
+!!    @param[in]        p_V         = particle velocity to compute block type and tag
+!!    @param[in,out]    scal        = scalar field to advect
+subroutine advecX_remesh_limited_no_com(ind_group, gs, p_pos_adim, p_V, j, k , scal, dt)
+
+    use cart_topology           ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_correction        ! Some procedures common to advection along all directions
+    use advec_remeshing_lambda  ! remeshing formula and wrapper for a line of particles
+
+    ! Input/Output
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP),dimension(:,:,:),intent(inout)     :: scal
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(inout)   :: p_V          ! particles velocity
+    real(WP), intent(in)                        :: dt
+
+    ! Other local variables
+    integer                                             :: i1, i2       ! position insde the group of line
+    ! Type and block
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+    real(WP), dimension(mesh_sc%N_proc(direction)+1,gs(1),gs(2)):: limit        ! limitator function (divided by 8.)
+    ! Variable used to remesh particles in a buffer
+    !real(WP),dimension(mesh_sc%N(direction))                               :: remesh_buffer! buffer use to remesh the scalar
+    integer                                 :: N_loc
+
+    N_loc = size(scal,1)
+
+    ! ===== Pre-Remeshing I: Determine blocks type and tag particles =====
+    call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+
+    ! ===== Pre-Remeshing II: Compute the limitor function =====
+    ! Actually, this subroutine compute [limitator/8] as this is this fraction
+    ! wich appear always in the remeshing polynoms.
+    call advecX_limitator_group_no_com(gs, j, k, p_pos_adim, scal, limit)
+
+    ! ===== Initialize the general buffer =====
+
+    do i1 = 1, gs(1)
+        do i2 = 1, gs(2)
+
+            ! -- [re-] init buffer --
+            !remesh_buffer = 0
+            p_V(:,1,1) = 0
+            !p_V(:,i1,i2) = 0
+
+            ! -- Remesh the particles in the buffer --
+            call AC_remesh_lambda2limited_array(direction, p_pos_adim(:,i1,i2), &
+                & scal(:,j+i1-1,k+i2-1), bl_type(:,i1,i2), bl_tag(:,i1,i2), limit(:,i1,i2), p_V(:,1,1))
+                !& scal(:,j+i1-1,k+i2-1), bl_type(:,i1,i2), bl_tag(:,i1,i2), limit(:,i1,i2), remesh_buffer)
+
+            ! -- Update scalar from buffer --
+            !scal(:,i1+j-1,k+i2-1) = remesh_buffer
+            scal(:,i1+j-1,k+i2-1) = p_V(1:N_loc,1,1)
+        end do
+    end do
+    !scal(:,i1+j-1:i1+j-1+gs(1),k+i2-1:k+i2-1+gs(2)) = p_V
+
+end subroutine advecX_remesh_limited_no_com
+
+
+!> Remesh particle inside a buffer - for M'6 or M'8 - no communication variant
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        dt          = time step (needed for tag and type)
+!!    @param[in]        p_V         = particle velocity to compute block type and tag
+!!    @param[in]        scalar      = the initial scalar field transported by particles
+!!    @param[out]       buffer      = buffer where particles are remeshed
+subroutine advecX_remesh_no_type_no_com(ind_group, gs, p_pos_adim, p_V, j, k , scalar, dt)
+
+    use cart_topology           ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_remeshing_Mprime  ! remeshing formula and wrapper for a line of particles
+
+    ! Input/Output
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(inout)   :: scalar       ! the initial scalar field transported by particles
+    real(WP), dimension(:,:,:), intent(inout)   :: p_V          ! particles velocity
+    real(WP), intent(in)                        :: dt
+
+    ! Other local variables
+    integer                                 :: i1, i2       ! indice of a line into the group
+    integer                                 :: ind          ! indice of the current particle inside the current line.
+    !real(WP),dimension(mesh_sc%N(direction))        :: remesh_buffer! buffer use to remesh the scalar
+    integer                                 :: N_loc
+
+    N_loc = size(scalar,1)
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+
+            ! -- [re-] init buffer --
+            p_V(:,1,1) = 0
+
+            ! -- Remesh the particles in the buffer --
+            do ind = 1, mesh_sc%N_proc(direction)
+                call AC_remesh_Mprime_array(direction, p_pos_adim(ind,i1,i2), scalar(ind,j+i1-1,k+i2-1), p_V(:,1,1))
+                !call AC_remesh_Mprime_array(direction, p_pos_adim(ind,i1,i2), scalar(ind,j+i1-1,k+i2-1), remesh_buffer))
+            end do
+
+            ! -- Update scalar from buffer --
+            !scalar(:,j+i1-1,k+i2-1) = remesh_buffer
+            scalar(:,j+i1-1,k+i2-1) = p_V(1:N_loc,1,1)
+
+        end do
+    end do
+
+
+end subroutine advecX_remesh_no_type_no_com
+
+
+!> Update the scalar field with scalar stored into the buffer
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        j,k         = Y- and Z-coordinate of the first line along X inside the current group of lines.
+!!    @param[in]        ind_proc    = algebric distance between me and the processus which send me the buffer. To read the right cartography.
+!!    @param[in]        gap         = algebric distance between my local indice and the local indices from the processus which send me the buffer.
+!!    @param[in]        begin_i1    = indice corresponding to the first place into the cartography
+!!                                      array where indice along the the direction of the group of lines are stored.
+!!    @param[in]        cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!!    @param[in]        buffer      = buffer containing to redistribute into the scalar field.
+!!    @param[out]       scalar      = scalar field (to update)
+!!    @param[out]       beg_buffer  = first indice inside the current cartography where mesh indices are stored. To know where reading data into the buffer.
+subroutine advecX_remesh_buffer_to_scalar(gs, j, k, ind_proc, gap, begin_i1, cartography, buffer, scalar, beg_buffer)
+
+    ! Input/Output
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    integer, intent(in)                         :: ind_proc     ! to read the good cartography associate to the processus which send me the buffer.
+    integer,intent(in)                          :: gap          ! gap between my local indices and the local indices from another processes
+    integer, intent(in)                         :: begin_i1     ! indice corresponding to the first place into the cartography
+                                                                ! array where indice along the the direction of the group of lines are stored.
+    integer, dimension(:,:), intent(in)         :: cartography
+    real(WP),dimension(:), intent(in)           :: buffer       ! buffer containing the data to redistribute into the local scalar field.
+    real(WP), dimension(:,:,:), intent(inout)   :: scalar       ! the scalar field.
+    integer, intent(inout)                      :: beg_buffer   ! first indice inside where the scalar values are stored into the buffer for the current sender processus.
+                                                                ! To know where reading data into the buffer.
+
+    ! Other local variables
+    integer         :: i1, i2       ! indice of a line into the group
+    integer         :: ind_for_i1   ! where to read the first coordinate (i1) of the current line inside the cartography?
+    integer         :: ind_i1_range ! ito know where to read the first coordinate (i1) of the current line inside the cartography.
+    integer         :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
+    ! To know where reading data into the buffer and where to write inside the scalar field:
+    integer         :: end_buffer   ! last indice inside where the scalar values are stored into the buffer for the current sender processus.
+    integer         :: beg_sca      ! first indice inside where the scalar values has to be write inside the scalar field.
+    integer         :: end_sca      ! last indice inside where the scalar values has to be write inside the scalar field.
+
+    ! Use the cartography to know which lines are concerned
+    ind_1Dtable = cartography(2,ind_proc) ! carto(2) = nb of element use to store i1 and i2 indices
+    ! Position in cartography(:,ind_proc) of the current i1 indice
+    ind_i1_range = begin_i1
+    do i2 = 1, gs(2)
+        do ind_for_i1 = ind_i1_range+1, ind_i1_range + cartography(2+i2,ind_proc), 2
+            do i1 = cartography(ind_for_i1,ind_proc), cartography(ind_for_i1+1,ind_proc)
+                beg_sca = cartography(ind_1Dtable+1,ind_proc)+gap
+                end_sca = cartography(ind_1Dtable+2,ind_proc)+gap
+                end_buffer = beg_buffer + end_sca - beg_sca
+                scalar(beg_sca:end_sca,j+i1-1,k+i2-1) = scalar(beg_sca:end_sca,j+i1-1,k+i2-1) &
+                    & + buffer(beg_buffer:end_buffer)
+                beg_buffer = end_buffer + 1
+                ind_1Dtable = ind_1Dtable + 2
+            end do
+        end do
+        ind_i1_range = ind_i1_range + cartography(2+i2,ind_proc)
+    end do
+
+end subroutine advecX_remesh_buffer_to_scalar
+
+
+! ====================================================================
+! ====================    Initialize particle     ====================
+! ====================================================================
+
+!> Creation and initialisation of a group of particle line
+!!    @param[in]    Vx          = 3D velocity field
+!!    @param[in]    j           = Y-indice of the current line
+!!    @param[in]    k           = Z-indice of the current line
+!!    @param[in]    Gsize       = size of groups (along X direction)
+!!    @param[out]   p_V         = particle velocity
+subroutine advecX_init_group(Vx, j, k, Gsize, p_V)
+
+    use cart_topology   ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                       :: j,k
+    integer, dimension(2), intent(in)         :: Gsize
+    real(WP), dimension(:,:,:),intent(out)    ::  p_V
+    real(WP), dimension(:,:,:), intent(in)    :: Vx
+    ! Other local variables
+    integer                                   :: ind          ! indice
+    integer                                   :: j_gp, k_gp   ! Y and Z indice of the current line in the group
+
+    do k_gp = 1, Gsize(2)
+        do j_gp = 1, Gsize(1)
+            do ind = 1, mesh_sc%N_proc(direction)
+                p_V(ind, j_gp, k_gp)        = Vx(ind,j+j_gp-1,k+k_gp-1)
+            end do
+        end do
+    end do
+
+end subroutine advecX_init_group
+
+
+
+! ######################################################################################
+! #####                                                                            #####
+! #####                         Private procedure                                  #####
+! #####                                                                            #####
+! ######################################################################################
+
+! ==================================================================================================================================
+! ====================     Compute scalar slope for introducing limitator (against numerical oscillations)      ====================
+! ==================================================================================================================================
+
+!> Compute scalar slopes for introducing limitator
+!!    @param[in]        gp_s        = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        p_pos       = particles position
+!!    @param[in]        scalar      = scalar advected by particles
+!!    @param[out]       limit       = limitator function
+!! @details
+!!        This subroutine work on a groupe of line. For each line of this group, it
+!!    determine the type of each block of this line and where corrected remeshing
+!!    formula are required. In those points, it tagg block transition (ie the end of
+!!    the current block and the beginning of the following one) in order to indicate
+!!    that corrected weigth have to be used during the remeshing.
+!!         Note that the subroutine actually computes limitator/8 as this is the
+!!    expression which is used inside the remeshing formula and directly computes it
+!!    minimize the number of operations.
+subroutine advecX_limitator_group(gp_s, ind_group, j, k, p_pos, &
+                & scalar, limit)
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+    use advec_correction! contains limitator computation
+    use precision_tools       ! define working precision_tools (double or simple)
+
+    integer, dimension(2),intent(in)                            :: gp_s         ! groupe size
+    integer, dimension(2), intent(in)                           :: ind_group    ! group indice
+    integer , intent(in)                                        :: j,k          ! bloc coordinates
+    real(WP), dimension(:,:,:), intent(in)                      :: p_pos        ! particle position
+    real(WP), dimension(:,:,:), intent(in)                      :: scalar       ! scalar field to advect
+    real(WP), dimension(:,:,:), intent(out)                     :: limit        ! limitator function
+
+    ! Local variables
+    real(WP),dimension(2,gp_s(1),gp_s(2))                       :: Sbuffer, Rbuffer ! buffer to exchange scalar or limitator at boundaries with neighbors.
+    real(WP),dimension(gp_s(1),gp_s(2),mesh_sc%N_proc(direction)+1)     :: deltaS      ! first order scalar variation
+    integer                                                     :: ind          ! loop indice on particle indice
+    integer                                                     :: send_request ! mpi status of nonblocking send
+    integer                                                     :: rece_request ! mpi status of nonblocking receive
+    integer, dimension(MPI_STATUS_SIZE)                         :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(MPI_STATUS_SIZE)                         :: send_status  ! mpi status (for mpi_wait)
+    integer, dimension(2)                                       :: tag_table    ! other tags for mpi message
+    integer                                                     :: com_size     ! size of mpi message
+    integer                                                     :: ierr         ! mpi error code
+
+    ! ===== Initialisation =====
+    com_size = 2*gp_s(1)*gp_s(2)
+
+    ! ===== Exchange ghost =====
+    ! Receive ghost value, ie value from neighbors boundaries.
+    tag_table = compute_tag(ind_group, tag_part_slope, direction)
+    call mpi_Irecv(Rbuffer(1,1,1), com_size, MPI_REAL_WP, &
+            & neighbors(direction,1), tag_table(1), D_comm(direction), rece_request, ierr)
+    ! Send ghost for the two first scalar values of each line
+    Sbuffer = scalar(1:2,j:j+gp_s(1)-1,k:k+gp_s(2)-1)
+    call mpi_ISsend(Sbuffer(1,1,1), com_size, MPI_REAL_WP, &
+            & neighbors(direction,-1), tag_table(1), D_comm(direction), send_request, ierr)
+
+    ! ===== Compute scalar variation =====
+    ! -- For the "middle" block --
+    do ind = 1, mesh_sc%N_proc(direction)-1
+        deltaS(:,:,ind) = scalar(ind+1,j:j+gp_s(1)-1,k:k+gp_s(2)-1) - scalar(ind,j:j+gp_s(1)-1,k:k+gp_s(2)-1)
+    end do
+    ! -- For the last elements  of each line --
+    ! Check reception
+    call mpi_wait(rece_request, rece_status, ierr)
+    ! Compute delta
+    deltaS(:,:,mesh_sc%N_proc(direction)) = Rbuffer(1,:,:) - scalar(ind,j:j+gp_s(1)-1,k:k+gp_s(2)-1)   ! scalar(N+1) - scalar(N)
+    deltaS(:,:,mesh_sc%N_proc(direction)+1) = Rbuffer(2,:,:) - Rbuffer(1,:,:)   ! scalar(N+1) - scalar(N)
+
+
+    ! ===== Compute slope and limitator =====
+    call AC_limitator_from_slopes(direction, gp_s, p_pos, deltaS,   &
+            & limit, tag_table(2), com_size)
+
+    ! ===== Close mpi_ISsend when done =====
+    call mpi_wait(send_request, send_status, ierr)
+
+end subroutine advecX_limitator_group
+
+
+!> Compute scalar slopes for introducing limitator - no communication variant
+!!(for topology without subdivision along X)
+!!    @param[in]        gp_s        = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]        p_pos       = particles position
+!!    @param[in]        scalar      = scalar advected by particles
+!!    @param[out]       limit       = limitator function
+!! @details
+!!        This subroutine work on a groupe of line. For each line of this group, it
+!!    determine the type of each block of this line and where corrected remeshing
+!!    formula are required. In those points, it tagg block transition (ie the end of
+!!    the current block and the beginning of the following one) in order to indicate
+!!    that corrected weigth have to be used during the remeshing.
+!!         Note that the subroutine actually computes limitator/8 as this is the
+!!    expression which is used inside the remeshing formula and directly computes it
+!!    minimize the number of operations.
+subroutine advecX_limitator_group_no_com(gp_s, j, k, p_pos, &
+                & scalar, limit)
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+    use precision_tools       ! define working precision_tools (double or simple)
+
+    integer, dimension(2),intent(in)                            :: gp_s         ! groupe size
+    integer , intent(in)                                        :: j,k          ! bloc coordinates
+    real(WP), dimension(:,:,:), intent(in)                      :: p_pos        ! particle position
+    real(WP), dimension(:,:,:), intent(in)                      :: scalar       ! scalar field to advect
+    real(WP), dimension(:,:,:), intent(out)                     :: limit        ! limitator function
+
+    ! Local variables
+    real(WP),dimension(gp_s(1),gp_s(2),0:mesh_sc%N_proc(direction)+1)   :: deltaS       ! first order scalar variation
+    integer                                                     :: ind          ! loop indice on particle indice
+    real(WP),dimension(gp_s(1),gp_s(2))                         :: afl          ! = cfl - [cfl] where [] denotes the nearest int.
+
+    ! ===== Compute scalar variation =====
+    ! -- For the "middle" block --
+    do ind = 1, mesh_sc%N_proc(direction)-1
+        deltaS(:,:,ind) = scalar(ind+1,j:j+gp_s(1)-1,k:k+gp_s(2)-1) - scalar(ind,j:j+gp_s(1)-1,k:k+gp_s(2)-1)
+    end do
+    ! -- For the first element of each line --
+    deltaS(:,:,0) = scalar(1,j:j+gp_s(1)-1,k:k+gp_s(2)-1) - scalar(mesh_sc%N(direction),j:j+gp_s(1)-1,k:k+gp_s(2)-1)   ! scalar(1) - scalar(0)
+    ! -- For the last element of each line --
+    deltaS(:,:,mesh_sc%N(direction))    = deltaS(:,:,0)   ! scalar(N+1) - scalar(N)
+    deltaS(:,:,mesh_sc%N(direction)+1)  = deltaS(:,:,1)   ! scalar(N+2) - scalar(N+1)
+
+
+    ! ===== Compute slope and limitator =====
+    ! Note that limit = limitator function/divided by 8
+    ! Van Leer limitator
+    do ind = 1, mesh_sc%N_proc(direction)
+        where(deltaS(:,:,ind)/=0)
+            afl = p_pos(ind,:,:)
+            afl = afl - nint(afl)
+            ! If (p_pos-nint(p_pos))>=0)
+            where(afl>0)
+                limit(ind+1,:,:) = max(0.0_WP,(deltaS(:,:,ind-1)/deltaS(:,:,ind)))
+                limit(ind+1,:,:) = limit(ind+1,:,:)/(limit(ind+1,:,:)+1)
+                limit(ind+1,:,:) = (4.0_WP/8._WP)*min(0.9_WP,(afl+0.5_WP)**2)*limit(ind+1,:,:)
+            elsewhere
+                limit(ind+1,:,:) = max(0.0_WP,(deltaS(:,:,ind+1)/deltaS(:,:,ind)))
+                limit(ind+1,:,:) = limit(ind+1,:,:)/(limit(ind+1,:,:)+1)
+                limit(ind+1,:,:) = (4.0_WP/8._WP)*min(0.9_WP,(afl-0.5_WP)**2)*limit(ind+1,:,:)
+            end where
+        elsewhere
+            limit(ind+1,:,:) = 0.0_WP
+        end where
+    end do
+    limit(1,:,:) = limit(mesh_sc%N_proc(direction)+1,:,:)
+    ! Classical (corrected) lambda formula: limitator function = 1
+    ! limit = 1._WP/8._WP
+
+end subroutine advecX_limitator_group_no_com
+
+
+
+end module advecX
+!> @}
diff --git a/HySoP/src/scalesReduced/particles/advecY.f90 b/HySoP/src/scalesReduced/particles/advecY.f90
new file mode 100644
index 0000000000000000000000000000000000000000..44f8a1b8909dad27ccff7356713fe00940588689
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advecY.f90
@@ -0,0 +1,480 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+
+!------------------------------------------------------------------------------
+!
+! MODULE: advecY
+!
+!
+! DESCRIPTION:
+!> The module advecY is devoted to the advection along Y axis of a scalar field.
+!! It used particle method and provide a parallel implementation.
+!
+!> @details
+!!     This module is a part of the advection solver based on particles method.
+!! The solver use some dimensionnal splitting and this module contains all the
+!! method used to solve advection along the Y-axis. This is a parallel
+!! implementation using MPI and the cartesien topology it provides.
+!!
+!! This module can use the method and variables defined in the module
+!! "advec_common" which gather information and tools shared for advection along
+!! x, y and z-axis.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advecY
+
+    use precision_tools
+    use advec_abstract_proc
+
+    implicit none
+
+    ! ===== Public procedures =====
+    ! -- Init remeshing context --
+    public  :: advecY_init_group
+    ! -- Remeshing algorithm --
+    public  :: advecY_remesh_in_buffer_lambda
+    public  :: advecY_remesh_in_buffer_limit_lambda
+    public  :: advecY_remesh_in_buffer_Mprime
+    public  :: advecY_remesh_buffer_to_scalar
+
+    ! ===== Private procedures =====
+    ! -- Compute limitator --
+    public  :: advecY_limitator_group
+
+    ! ===== Private variables =====
+    !> current direction = alongY (to avoid redefinition and make more easy cut/paste)
+    integer, parameter, private      :: direction = 2
+
+    interface advecY_init
+        module procedure advecY_init_group
+    end interface advecY_init
+
+contains
+
+
+! #####################################################################################
+! #####                                                                           #####
+! #####                         Public procedure                                  #####
+! #####                                                                           #####
+! #####################################################################################
+
+
+! ============================================================
+! ====================    Remeshing tools ====================
+! ============================================================
+
+!> Remesh particle inside a buffer. Use corrected lambda remeshing polynoms.
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        ind_min     = indices from the original array "pos_in_buffer" does not start from 1.
+!!                                    It actually start from ind_min and to avoid access out of range,
+!!                                    a gap of (-ind_min) will be added to each indices from "pos_in_buffer.
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        scalar      = the initial scalar field transported by particles
+!!    @param[out]       buffer      = buffer where particles are remeshed
+!!    @param[in]        remesh_line = subroutine wich remesh a line of particle with the right remeshing formula
+subroutine advecY_remesh_in_buffer_lambda(gs, i, k, ind_min, p_pos_adim, bl_type, bl_tag, send_min, send_max, &
+        & scalar, buffer, pos_in_buffer)
+
+    use cart_topology           ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_abstract_proc     ! profile of generic procedure
+    use advec_remeshing_lambda  ! needed to remesh !!
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                   :: gs
+    integer, intent(in)                                 :: i, k
+    integer, intent(in)                                 :: ind_min
+    real(WP), dimension(:,:,:), intent(in)              :: p_pos_adim   ! adimensionned particles position
+    logical, dimension(:,:,:), intent(in)               :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(:,:,:), intent(in)               :: bl_tag       ! indice of tagged particles
+    integer, dimension(:,:), intent(in)                 :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)                 :: send_max     ! distance between me and processus wich send me information
+    real(WP), dimension(:,:,:), intent(inout)           :: scalar       ! the initial scalar field transported by particles
+    real(WP),dimension(:), intent(out), target          :: buffer       ! buffer where particles are remeshed
+    integer, dimension(:), intent(inout)                :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
+                                                                        ! in part corresponding to different processes
+
+    ! Other local variables
+    integer                                 :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the
+    type(real_pter),dimension(:),allocatable:: remeshY_pter ! pointer to send buffer in which scalar are sorted by line indice.
+                                                            ! sorted by receivers
+    integer                                 :: i1, i2       ! indice of a line into the group
+    integer                                 :: ind          ! indice of the current particle inside the current line.
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_min(i1,i2)
+            send_j_max = send_max(i1,i2)
+
+            ! -- Allocate remeshY_pter --
+            allocate(remeshY_pter(send_j_min:send_j_max))
+            do ind = send_j_min, send_j_max
+                proc_gap = floor(real(ind-1, WP)/mesh_sc%N_proc(direction)) - (ind_min-1)
+                remeshY_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
+                pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+            end do
+
+            ! -- Remesh the particles in the buffer --
+            call AC_remesh_lambda_pter(direction, p_pos_adim(:,i1,i2), scalar(i+i1-1,:,k+i2-1), &
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, remeshY_pter)
+
+            deallocate(remeshY_pter)
+        end do
+    end do
+
+    ! Now scalar is put in buffer. Therefore, scalar has to be
+    ! re-init to 0 before starting to redistribute to the scalar.
+    scalar(i:i+gs(1)-1,:,k:k+gs(2)-1) = 0
+
+end subroutine advecY_remesh_in_buffer_lambda
+
+
+!> Remesh particle inside a buffer. Use corrected limited (corrected) lambda remeshing polynoms.
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in]        ind_min     = indices from the original array "pos_in_buffer" does not start from 1.
+!!                                    It actually start from ind_min and to avoid access out of range,
+!!                                    a gap of (-ind_min) will be added to each indices from "pos_in_buffer.
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        bl_type     = table of blocks type (center of left)
+!!    @param[in]        bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
+!!                                    and the begining of the following one is tagged)
+!!    @param[in]        limit       = limitator function
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        scalar      = the initial scalar field transported by particles
+!!    @param[out]       buffer      = buffer where particles are remeshed
+!!    @param[in,out]    pos_in_buffer   = information about where remesing the particle inside the buffer
+subroutine advecY_remesh_in_buffer_limit_lambda(gs, i, k, ind_min, p_pos_adim, bl_type, bl_tag, limit, &
+        & send_min, send_max, scalar, buffer, pos_in_buffer)
+
+    use cart_topology           ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+!use advec_abstract_proc     ! profile of generic procedure
+    use advec_remeshing_lambda  ! needed to remesh !!
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                   :: gs
+    integer, intent(in)                                 :: i, k
+    integer, intent(in)                                 :: ind_min
+    real(WP), dimension(:,:,:), intent(in)              :: p_pos_adim   ! adimensionned particles position
+    logical, dimension(:,:,:), intent(in)               :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(:,:,:), intent(in)               :: bl_tag       ! indice of tagged particles
+    real(WP), dimension(:,:,:), intent(in)              :: limit        ! limitator function (divided by 8)
+    integer, dimension(:,:), intent(in)                 :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)                 :: send_max     ! distance between me and processus wich send me information
+    real(WP), dimension(:,:,:), intent(inout)           :: scalar       ! the initial scalar field transported by particles
+    real(WP),dimension(:), intent(out), target          :: buffer       ! buffer where particles are remeshed
+    integer, dimension(:), intent(inout)                :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
+                                                                        ! in part corresponding to different processes
+
+    ! Other local variables
+    integer                                 :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the
+    type(real_pter),dimension(:),allocatable:: remeshY_pter ! pointer to send buffer in which scalar are sorted by line indice.
+                                                            ! sorted by receivers
+    integer                                 :: i1, i2       ! indice of a line into the group
+    integer                                 :: ind          ! indice of the current particle inside the current line.
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_min(i1,i2)
+            send_j_max = send_max(i1,i2)
+
+            ! -- Allocate remeshY_pter --
+            allocate(remeshY_pter(send_j_min:send_j_max))
+            do ind = send_j_min, send_j_max
+                proc_gap = floor(real(ind-1, WP)/mesh_sc%N_proc(direction)) - (ind_min-1)
+                remeshY_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
+                pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+            end do
+
+            ! -- Remesh the particles in the buffer --
+            call AC_remesh_lambda2limited_pter(direction, p_pos_adim(:,i1,i2), scalar(i+i1-1,:,k+i2-1), &
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, limit(:,i1,i2), remeshY_pter)
+
+            deallocate(remeshY_pter)
+        end do
+    end do
+
+    ! Now scalar is put in buffer. Therefore, scalar has to be
+    ! re-init to 0 before starting to redistribute to the scalar.
+    scalar(i:i+gs(1)-1,:,k:k+gs(2)-1) = 0
+
+end subroutine advecY_remesh_in_buffer_limit_lambda
+
+
+!> Remesh particle inside a buffer - for M'6 or M'8 - direction = along Y
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        ind_min     = indices from the original array "pos_in_buffer" does not start from 1.
+!!                                    It actually start from ind_min and to avoid access out of range,
+!!                                    a gap of (-ind_min) will be added to each indices from "pos_in_buffer.
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        scalar      = the initial scalar field transported by particles
+!!    @param[out]       buffer      = buffer where particles are remeshed
+subroutine advecY_remesh_in_buffer_Mprime(gs, i, k, ind_min, p_pos_adim, send_min, send_max, &
+        & scalar, buffer, pos_in_buffer)
+
+    use cart_topology           ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_abstract_proc     ! profile of generic procedure
+    use advec_remeshing_Mprime  ! remeshing formula and wrapper for a line of particles
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                   :: gs
+    integer, intent(in)                                 :: i, k
+    integer, intent(in)                                 :: ind_min
+    real(WP), dimension(:,:,:), intent(in)              :: p_pos_adim   ! adimensionned particles position
+    integer, dimension(:,:), intent(in)                 :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)                 :: send_max     ! distance between me and processus wich send me information
+    real(WP), dimension(:,:,:), intent(inout)           :: scalar       ! the initial scalar field transported by particles
+    real(WP),dimension(:), intent(out), target          :: buffer       ! buffer where particles are remeshed
+    integer, dimension(:), intent(inout)                :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
+                                                                        ! in part corresponding to different processes
+
+    ! Other local variables
+    integer                                 :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the
+    type(real_pter),dimension(:),allocatable:: remeshY_pter  ! pointer to send buffer in which scalar are sorted by line indice.
+                                                            ! sorted by receivers
+    integer                                 :: i1, i2       ! indice of a line into the group
+    integer                                 :: ind          ! indice of the current particle inside the current line.
+    !! real(WP), dimension(mesh_sc%N_proc(direction))  :: pos_translat ! translation of p_pos_adim as array indice
+    !!                                                        ! are now starting from 1 and not ind_min
+
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+
+            ! -- Allocate remeshX_pter --
+            allocate(remeshY_pter(send_min(i1,i2):send_max(i1,i2)))
+            do ind = send_min(i1,i2), send_max(i1,i2)
+                proc_gap = floor(real(ind-1, WP)/mesh_sc%N_proc(direction)) - (ind_min-1)
+                remeshY_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
+                pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+            end do
+
+            !! pos_translat = p_pos_adim(:,i1,i2) - send_min(i1,i2) + 1
+            !! Index translation is performed in the AC_remesh_Mprime_pter subroutine on the
+            !! integer adimensionned particle position instead of here on the float position
+
+            ! -- Remesh the particles in the buffer --
+            do ind = 1, mesh_sc%N_proc(direction)
+                call AC_remesh_Mprime_pter(p_pos_adim(ind,i1,i2), 1-send_min(i1,i2), scalar(i+i1-1,ind,k+i2-1), remeshY_pter)
+            end do
+
+            deallocate(remeshY_pter)
+        end do
+    end do
+
+    ! Scalar must be re-init before ending the remeshing
+    scalar(i:i+gs(1)-1,:,k:k+gs(2)-1) = 0
+
+end subroutine advecY_remesh_in_buffer_Mprime
+
+
+!> Update the scalar field with scalar stored into the buffer
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        i,k         = Y- and Z-coordinate of the first line along X inside the current group of lines.
+!!    @param[in]        ind_proc    = algebric distance between me and the processus which send me the buffer. To read the right cartography.
+!!    @param[in]        gap         = algebric distance between my local indice and the local indices from the processus which send me the buffer.
+!!    @param[in]        begin_i1    = indice corresponding to the first place into the cartography
+!!                                      array where indice along the the direction of the group of lines are stored.
+!!    @param[in]        cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!!    @param[in]        buffer      = buffer containing to redistribute into the scalar field.
+!!    @param[out]       scalar      = scalar field (to update)
+!!    @param[out]       beg_buffer  = first indice inside the current cartography where mesh indices are stored. To know where reading data into the buffer.
+subroutine advecY_remesh_buffer_to_scalar(gs, i, k, ind_proc, gap, begin_i1, cartography, buffer, scalar, beg_buffer)
+
+    ! Input/Output
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: i, k
+    integer, intent(in)                         :: ind_proc     ! to read the good cartography associate to the processus which send me the buffer.
+    integer,intent(in)                          :: gap          ! gap between my local indices and the local indices from another processes
+    integer, intent(in)                         :: begin_i1     ! indice corresponding to the first place into the cartography
+                                                                ! array where indice along the the direction of the group of lines are stored.
+    integer, dimension(:,:), intent(in)         :: cartography
+    real(WP),dimension(:), intent(in)           :: buffer       ! buffer containing the data to redistribute into the local scalar field.
+    real(WP), dimension(:,:,:), intent(inout)   :: scalar       ! the scalar field.
+    integer, intent(inout)                      :: beg_buffer   ! first indice inside where the scalar values are stored into the buffer for the current sender processus.
+                                                                ! To know where reading data into the buffer.
+
+    ! Other local variables
+    integer         :: i1, i2       ! indice of a line into the group
+    integer         :: ind_for_i1   ! where to read the first coordinate (i1) of the current line inside the cartography?
+    integer         :: ind_i1_range ! ito know where to read the first coordinate (i1) of the current line inside the cartography.
+    integer         :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
+    ! To know where reading data into the buffer and where to write inside the scalar field:
+    integer         :: end_buffer   ! last indice inside where the scalar values are stored into the buffer for the current sender processus.
+    integer         :: beg_sca      ! first indice inside where the scalar values has to be write inside the scalar field.
+    integer         :: end_sca      ! last indice inside where the scalar values has to be write inside the scalar field.
+
+    ! Use the cartography to know which lines are concerned
+    ind_1Dtable = cartography(2,ind_proc) ! carto(2) = nb of element use to store i1 and i2 indices
+    ! Position in cartography(:,ind_proc) of the current i1 indice
+    ind_i1_range = begin_i1
+    do i2 = 1, gs(2)
+        do ind_for_i1 = ind_i1_range+1, ind_i1_range + cartography(2+i2,ind_proc), 2
+            do i1 = cartography(ind_for_i1,ind_proc), cartography(ind_for_i1+1,ind_proc)
+                beg_sca = cartography(ind_1Dtable+1,ind_proc)+gap
+                end_sca = cartography(ind_1Dtable+2,ind_proc)+gap
+                end_buffer = beg_buffer + end_sca - beg_sca
+                scalar(i+i1-1,beg_sca:end_sca,k+i2-1) = scalar(i+i1-1,beg_sca:end_sca,k+i2-1) &
+                    & + buffer(beg_buffer:end_buffer)
+                beg_buffer = end_buffer + 1
+                ind_1Dtable = ind_1Dtable + 2
+            end do
+        end do
+        ind_i1_range = ind_i1_range + cartography(2+i2,ind_proc)
+    end do
+
+end subroutine advecY_remesh_buffer_to_scalar
+
+
+! ====================================================================
+! ====================    Initialize particle     ====================
+! ====================================================================
+
+!> Creation and initialisation of a group of particle line
+!!    @param[in]    Vy          = 3D velocity field
+!!    @param[in]    i           = X-indice of the current line
+!!    @param[in]    k           = Z-indice of the current line
+!!    @param[in]    Gsize       = size of groups (along Y direction)
+!!    @param[out]   p_V         = particle velocity
+subroutine advecY_init_group(Vy, i, k, Gsize, p_V)
+
+    use cart_topology   ! description of mesh and of mpi topology
+
+    ! input/output
+    integer, intent(in)                         :: i,k
+    integer, dimension(2), intent(in)           :: Gsize
+    real(WP), dimension(:,:,:),intent(out)      :: p_V
+    real(WP), dimension(:,:,:), intent(in)      :: Vy
+    ! Other local variables
+    integer                                     :: ind          ! indice
+    integer                                     :: i_gp, k_gp   ! Y and Z indice of the current line in the group
+
+    do k_gp = 1, Gsize(2)
+        do i_gp = 1, Gsize(1)
+            do ind = 1, mesh_sc%N_proc(direction)
+                p_V(ind, i_gp, k_gp)        = Vy(i+i_gp-1,ind,k+k_gp-1)
+            end do
+        end do
+    end do
+
+end subroutine advecY_init_group
+
+! ######################################################################################
+! #####                                                                            #####
+! #####                         Private procedure                                  #####
+! #####                                                                            #####
+! ######################################################################################
+
+! ==================================================================================================================================
+! ====================     Compute scalar slope for introducing limitator (against numerical oscillations)      ====================
+! ==================================================================================================================================
+
+!> Compute scalar slopes for introducing limitator
+!!    @param[in]        gp_s        = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        p_pos       = particles position
+!!    @param[in]        scalar      = scalar advected by particles
+!!    @param[out]       limit       = limitator function
+!! @details
+!!        This subroutine work on a groupe of line. For each line of this group, it
+!!    determine the type of each block of this line and where corrected remeshing
+!!    formula are required. In those points, it tagg block transition (ie the end of
+!!    the current block and the beginning of the following one) in order to indicate
+!!    that corrected weigth have to be used during the remeshing.
+!!         Note that the subroutine actually computes limitator/8 as this is the
+!!    expression which is used inside the remeshing formula and directly computes it
+!!    minimize the number of operations.
+subroutine advecY_limitator_group(gp_s, ind_group, i, k, p_pos, &
+                & scalar, limit)
+
+    
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+    use advec_correction! contains limitator computation
+    use precision_tools       ! define working precision_tools (double or simple)
+
+    integer, dimension(2),intent(in)                            :: gp_s         ! groupe size
+    integer, dimension(2), intent(in)                           :: ind_group    ! group indice
+    integer, intent(in)                                         :: i,k          ! bloc coordinates
+    real(WP), dimension(:,:,:), intent(in)                      :: p_pos        ! particle position
+    real(WP), dimension(:,:,:), intent(in)                      :: scalar       ! scalar field to advect
+    real(WP), dimension(:,:,:), intent(out)                     :: limit        ! limitator function
+
+    ! Local variables
+    real(WP),dimension(gp_s(1),gp_s(2),2)                       :: Sbuffer, Rbuffer ! buffer to exchange scalar or limitator at boundaries with neighbors.
+    real(WP),dimension(gp_s(1),gp_s(2),mesh_sc%N_proc(direction)+1)     :: deltaS       ! first order scalar variation
+    integer                                                     :: ind,i1,i2    ! loop indice
+    integer                                                     :: send_request ! mpi status of nonblocking send
+    integer                                                     :: rece_request ! mpi status of nonblocking receive
+    integer, dimension(MPI_STATUS_SIZE)                         :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(MPI_STATUS_SIZE)                         :: send_status  ! mpi status (for mpi_wait)
+    integer, dimension(2)                                       :: tag_table    ! other tags for mpi message
+    integer                                                     :: com_size     ! size of mpi message
+    integer                                                     :: ierr         ! mpi error code
+
+    ! ===== Initialisation =====
+    com_size = 2*gp_s(1)*gp_s(2)
+
+    ! ===== Exchange ghost =====
+    ! Receive ghost value, ie value from neighbors boundaries.
+    tag_table = compute_tag(ind_group, tag_part_slope, direction)
+    call mpi_Irecv(Rbuffer(1,1,1), com_size, MPI_REAL_WP, &
+            & neighbors(direction,1), tag_table(1), D_comm(direction), rece_request, ierr)
+    ! Send ghost for the two first scalar values of each line
+    do i1 = 1, gp_s(1)
+        do i2 = 1, gp_s(2)
+            Sbuffer(i1,i2,1) = scalar(i+i1-1,1,k+i2-1)
+            Sbuffer(i1,i2,2) = scalar(i+i1-1,2,k+i2-1)
+        end do
+    end do
+    call mpi_ISsend(Sbuffer(1,1,1), com_size, MPI_REAL_WP, &
+            & neighbors(direction,-1), tag_table(1), D_comm(direction), send_request, ierr)
+
+    ! ===== Compute scalar variation =====
+    ! -- For the "middle" block --
+    do ind = 1, mesh_sc%N_proc(direction)-1
+        deltaS(:,:,ind) = scalar(i:i+gp_s(1)-1,ind+1,k:k+gp_s(2)-1) &
+                        & - scalar(i:i+gp_s(1)-1,ind,k:k+gp_s(2)-1)
+    end do
+    ! -- For the last element of each line --
+    ! Check reception
+    call mpi_wait(rece_request, rece_status, ierr)
+    ! Compute delta
+    deltaS(:,:,mesh_sc%N_proc(direction)) = Rbuffer(:,:,1) - scalar(i:i+gp_s(1)-1,ind,k:k+gp_s(2)-1)   ! scalar(N+1) - scalar(N)
+    deltaS(:,:,mesh_sc%N_proc(direction)+1) = Rbuffer(:,:,2) - Rbuffer(:,:,1)   ! scalar(N+1) - scalar(N)
+
+
+    ! ===== Compute limitator =====
+    call AC_limitator_from_slopes(direction, gp_s, p_pos, deltaS,   &
+            & limit, tag_table(2), com_size)
+
+    ! ===== Close mpi_ISsend when done =====
+    call mpi_wait(send_request, send_status, ierr)
+
+end subroutine advecY_limitator_group
+
+
+end module advecY
+!> @}
diff --git a/HySoP/src/scalesReduced/particles/advecZ.f90 b/HySoP/src/scalesReduced/particles/advecZ.f90
new file mode 100644
index 0000000000000000000000000000000000000000..4bb0e5a76e4684d16b90cb8d9c0bdac8ca402a0d
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advecZ.f90
@@ -0,0 +1,476 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+
+!------------------------------------------------------------------------------
+!
+! MODULE: advecZ
+!
+!
+! DESCRIPTION:
+!> The module advecZ is devoted to the advection along Z axis of a scalar field.
+!! It used particle method and provide a parallel implementation.
+!
+!> @details
+!! This module is a part of the advection solver based on particles method.
+!! The solver use some dimensionnal splitting and this module contains all the
+!! method used to solve advection along the Z-axis. This is a parallel
+!! implementation using MPI and the cartesien topology it provides.
+!!
+!! This module can use the method and variables defined in the module
+!! "advec_common" which gather information and tools shared for advection along
+!! x, y and z-axis.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advecZ
+
+    use precision_tools
+    use advec_abstract_proc
+
+    implicit none
+
+    ! ===== Public procedures =====
+    ! -- Init remeshing context --
+    public  :: advecZ_init_group
+    ! -- Remeshing algorithm --
+    public  :: advecZ_remesh_in_buffer_lambda
+    public  :: advecZ_remesh_in_buffer_limit_lambda
+    public  :: advecZ_remesh_in_buffer_Mprime
+    public  :: advecZ_remesh_buffer_to_scalar
+
+    ! ===== Private procedures =====
+    ! -- Compute limitator --
+    public  :: advecZ_limitator_group
+
+    ! ===== Private variable ====
+    !> Current direction = 3 ie along Z
+    integer, parameter, private     :: direction = 3
+
+contains
+
+! #####################################################################################
+! #####                                                                           #####
+! #####                         Public procedure                                  #####
+! #####                                                                           #####
+! #####################################################################################
+
+! ====================================================================
+! ====================    Remeshing tools         ====================
+! ====================================================================
+
+!> Remesh particle inside a buffer. Use corrected lambda remeshing polynoms.
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        i,j         = X- and Y-coordinates of the first line along X inside the current group of lines.
+!!    @param[in]        ind_min     = indices from the original array "pos_in_buffer" does not start from 1.
+!!                                    It actually start from ind_min and to avoid access out of range,
+!!                                    a gap of (-ind_min) will be added to each indices from "pos_in_buffer.
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        bl_type     = table of blocks type (center of left)
+!!    @param[in]        bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
+!!                                    and the begining of the following one is tagged)
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        scalar      = the initial scalar field transported by particles
+!!    @param[out]       buffer      = buffer where particles are remeshed
+!!    @param[in,out]    pos_in_buffer   = information about where remesing the particle inside the buffer
+subroutine advecZ_remesh_in_buffer_lambda(gs, i, j, ind_min, p_pos_adim, bl_type, bl_tag, send_min, send_max, &
+        & scalar, buffer, pos_in_buffer)
+
+    use cart_topology           ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_abstract_proc     ! profile of generic procedure
+    use advec_remeshing_lambda  ! needed to remesh !!
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                   :: gs
+    integer, intent(in)                                 :: i, j
+    integer, intent(in)                                 :: ind_min
+    real(WP), dimension(:,:,:), intent(in)              :: p_pos_adim   ! adimensionned particles position
+    logical, dimension(:,:,:), intent(in)               :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(:,:,:), intent(in)               :: bl_tag       ! indice of tagged particles
+    integer, dimension(:,:), intent(in)                 :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)                 :: send_max     ! distance between me and processus wich send me information
+    real(WP), dimension(:,:,:), intent(inout)           :: scalar       ! the initial scalar field transported by particles
+    real(WP),dimension(:), intent(out), target          :: buffer       ! buffer where particles are remeshed
+    integer, dimension(:), intent(inout)                :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
+                                                                        ! in part corresponding to different processes
+
+    ! Other local variables
+    integer                                 :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the
+    type(real_pter),dimension(:),allocatable:: remeshZ_pter  ! pointer to send buffer in which scalar are sorted by line indice.
+                                                            ! sorted by receivers
+    integer                                 :: i1, i2       ! indice of a line into the group
+    integer                                 :: ind          ! indice of the current particle inside the current line.
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_min(i1,i2)
+            send_j_max = send_max(i1,i2)
+
+            ! -- Allocate remeshX_pter --
+            allocate(remeshZ_pter(send_j_min:send_j_max))
+            do ind = send_j_min, send_j_max
+                proc_gap = floor(real(ind-1, WP)/mesh_sc%N_proc(direction)) - (ind_min-1)
+                remeshZ_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
+                pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+            end do
+
+            ! -- Remesh the particles in the buffer --
+            call AC_remesh_lambda_pter(direction, p_pos_adim(:,i1,i2), scalar(i+i1-1,j+i2-1,:), &
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, remeshZ_pter)
+
+            deallocate(remeshZ_pter)
+        end do
+    end do
+
+    ! Scalar must be re-init before ending the remeshing
+    scalar(i:i+gs(1)-1,j:j+gs(2)-1,:) = 0
+
+end subroutine advecZ_remesh_in_buffer_lambda
+
+
+!> Remesh particle inside a buffer. Use corrected lambda remeshing polynoms.
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        i,j         = X- and Y-coordinates of the first line along X inside the current group of lines.
+!!    @param[in]        ind_min     = indices from the original array "pos_in_buffer" does not start from 1.
+!!                                    It actually start from ind_min and to avoid access out of range,
+!!                                    a gap of (-ind_min) will be added to each indices from "pos_in_buffer.
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        bl_type     = table of blocks type (center of left)
+!!    @param[in]        bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
+!!                                    and the begining of the following one is tagged)
+!!    @param[in]        limit       = limitator function
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        scalar      = the initial scalar field transported by particles
+!!    @param[out]       buffer      = buffer where particles are remeshed
+!!    @param[in,out]    pos_in_buffer   = information about where remesing the particle inside the buffer
+subroutine advecZ_remesh_in_buffer_limit_lambda(gs, i, j, ind_min, p_pos_adim, bl_type, bl_tag, limit, &
+        & send_min, send_max, scalar, buffer, pos_in_buffer)
+
+    use cart_topology           ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_abstract_proc     ! profile of generic procedure
+    use advec_remeshing_lambda  ! needed to remesh !!
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                   :: gs
+    integer, intent(in)                                 :: i, j
+    integer, intent(in)                                 :: ind_min
+    real(WP), dimension(:,:,:), intent(in)              :: p_pos_adim   ! adimensionned particles position
+    logical, dimension(:,:,:), intent(in)               :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(:,:,:), intent(in)               :: bl_tag       ! indice of tagged particles
+    real(WP), dimension(:,:,:), intent(in)              :: limit        ! limitator function (divided by 8)
+    integer, dimension(:,:), intent(in)                 :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)                 :: send_max     ! distance between me and processus wich send me information
+    real(WP), dimension(:,:,:), intent(inout)           :: scalar       ! the initial scalar field transported by particles
+    real(WP),dimension(:), intent(out), target          :: buffer       ! buffer where particles are remeshed
+    integer, dimension(:), intent(inout)                :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
+                                                                        ! in part corresponding to different processes
+
+    ! Other local variables
+    integer                                 :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the
+    type(real_pter),dimension(:),allocatable:: remeshZ_pter  ! pointer to send buffer in which scalar are sorted by line indice.
+                                                            ! sorted by receivers
+    integer                                 :: i1, i2       ! indice of a line into the group
+    integer                                 :: ind          ! indice of the current particle inside the current line.
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_min(i1,i2)
+            send_j_max = send_max(i1,i2)
+
+            ! -- Allocate remeshX_pter --
+            allocate(remeshZ_pter(send_j_min:send_j_max))
+            do ind = send_j_min, send_j_max
+                proc_gap = floor(real(ind-1, WP)/mesh_sc%N_proc(direction)) - (ind_min-1)
+                remeshZ_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
+                pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+            end do
+
+            ! -- Remesh the particles in the buffer --
+            call AC_remesh_lambda2limited_pter(direction, p_pos_adim(:,i1,i2), scalar(i+i1-1,j+i2-1,:), &
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, limit(:,i1,i2), remeshZ_pter)
+
+            deallocate(remeshZ_pter)
+        end do
+    end do
+
+    ! Scalar must be re-init before ending the remeshing
+    scalar(i:i+gs(1)-1,j:j+gs(2)-1,:) = 0
+
+end subroutine advecZ_remesh_in_buffer_limit_lambda
+
+
+!> Remesh particle inside a buffer - for M'6 or M'8 - direction = along Z
+!! @autor Jean-Baptiste Lagaert
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        i,j         = X- and Y-coordinates of the first line along X inside the current group of lines.
+!!    @param[in]        ind_min     = indices from the original array "pos_in_buffer" does not start from 1.
+!!                                    It actually start from ind_min and to avoid access out of range,
+!!                                    a gap of (-ind_min) will be added to each indices from "pos_in_buffer.
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        scalar      = the initial scalar field transported by particles
+!!    @param[out]       buffer      = buffer where particles are remeshed
+!!    @param[in,out]    pos_in_buffer   = information about where remesing the particle inside the buffer
+subroutine advecZ_remesh_in_buffer_Mprime(gs, i, j, ind_min, p_pos_adim, send_min, send_max, &
+        & scalar, buffer, pos_in_buffer)
+
+    use cart_topology           ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_abstract_proc     ! profile of generic procedure
+    use advec_remeshing_Mprime  ! remeshing formula and wrapper for a line of particles
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                   :: gs
+    integer, intent(in)                                 :: i, j
+    integer, intent(in)                                 :: ind_min
+    real(WP), dimension(:,:,:), intent(in)              :: p_pos_adim   ! adimensionned particles position
+    integer, dimension(:,:), intent(in)                 :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)                 :: send_max     ! distance between me and processus wich send me information
+    real(WP), dimension(:,:,:), intent(inout)           :: scalar       ! the initial scalar field transported by particles
+    real(WP),dimension(:), intent(out), target          :: buffer       ! buffer where particles are remeshed
+    integer, dimension(:), intent(inout)                :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
+                                                                        ! in part corresponding to different processes
+
+    ! Other local variables
+    integer                                 :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the
+    type(real_pter),dimension(:),allocatable:: remeshZ_pter  ! pointer to send buffer in which scalar are sorted by line indice.
+                                                            ! sorted by receivers
+    integer                                 :: i1, i2       ! indice of a line into the group
+    integer                                 :: ind          ! indice of the current particle inside the current line.
+    !! real(WP), dimension(mesh_sc%N_proc(direction))  :: pos_translat ! translation of p_pos_adim as array indice
+    !!                                                        ! are now starting from 1 and not ind_min
+
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+
+            ! -- Allocate remeshZ_pter --
+            allocate(remeshZ_pter(send_min(i1,i2):send_max(i1,i2)))
+            do ind = send_min(i1,i2), send_max(i1,i2)
+                proc_gap = floor(real(ind-1, WP)/mesh_sc%N_proc(direction)) - (ind_min-1)
+                remeshZ_pter(ind)%pter => buffer(pos_in_buffer(proc_gap))
+                pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1
+            end do
+
+            !! pos_translat = p_pos_adim(:,i1,i2) - send_min(i1,i2) + 1
+            !! Index translation is performed in the AC_remesh_Mprime_pter subroutine on the
+            !! integer adimensionned particle position instead of here on the float position
+
+            ! -- Remesh the particles in the buffer --
+            do ind = 1, mesh_sc%N_proc(direction)
+                call AC_remesh_Mprime_pter(p_pos_adim(ind,i1,i2), 1-send_min(i1,i2), scalar(i+i1-1,j+i2-1,ind), remeshZ_pter)
+            end do
+
+            deallocate(remeshZ_pter)
+        end do
+    end do
+
+    ! Scalar must be re-init before ending the remeshing
+    scalar(i:i+gs(1)-1,j:j+gs(2)-1,:) = 0
+
+end subroutine advecZ_remesh_in_buffer_Mprime
+
+
+!> Update the scalar field with scalar stored into the buffer
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        i,j         = X- and Y-coordinates of the first line along X inside the current group of lines.
+!!    @param[in]        ind_proc    = algebric distance between me and the processus which send me the buffer. To read the right cartography.
+!!    @param[in]        gap         = algebric distance between my local indice and the local indices from the processus which send me the buffer.
+!!    @param[in]        begin_i1    = indice corresponding to the first place into the cartography
+!!                                      array where indice along the the direction of the group of lines are stored.
+!!    @param[in]        cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!!    @param[in]        buffer      = buffer containing to redistribute into the scalar field.
+!!    @param[out]       scalar      = scalar field (to update)
+!!    @param[out]       beg_buffer  = first indice inside the current cartography where mesh indices are stored. To know where reading data into the buffer.
+subroutine advecZ_remesh_buffer_to_scalar(gs, i, j, ind_proc, gap, begin_i1, cartography, buffer, scalar, beg_buffer)
+
+    ! Input/Output
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: i, j
+    integer, intent(in)                         :: ind_proc     ! to read the good cartography associate to the processus which send me the buffer.
+    integer,intent(in)                          :: gap          ! gap between my local indices and the local indices from another processes
+    integer, intent(in)                         :: begin_i1     ! indice corresponding to the first place into the cartography
+                                                                ! array where indice along the the direction of the group of lines are stored.
+    integer, dimension(:,:), intent(in)         :: cartography
+    real(WP),dimension(:), intent(in)           :: buffer       ! buffer containing the data to redistribute into the local scalar field.
+    real(WP), dimension(:,:,:), intent(inout)   :: scalar       ! the scalar field.
+    integer, intent(inout)                      :: beg_buffer   ! first indice inside where the scalar values are stored into the buffer for the current sender processus.
+                                                                ! To know where reading data into the buffer.
+
+    ! Other local variables
+    integer         :: i1, i2       ! indice of a line into the group
+    integer         :: ind_for_i1   ! where to read the first coordinate (i1) of the current line inside the cartography?
+    integer         :: ind_i1_range ! ito know where to read the first coordinate (i1) of the current line inside the cartography.
+    integer         :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
+    ! To know where reading data into the buffer and where to write inside the scalar field:
+    integer         :: end_buffer   ! last indice inside where the scalar values are stored into the buffer for the current sender processus.
+    integer         :: beg_sca      ! first indice inside where the scalar values has to be write inside the scalar field.
+    integer         :: end_sca      ! last indice inside where the scalar values has to be write inside the scalar field.
+
+    ! Use the cartography to know which lines are concerned
+    ind_1Dtable = cartography(2,ind_proc) ! carto(2) = nb of element use to store i1 and i2 indices
+    ! Position in cartography(:,ind_proc) of the current i1 indice
+    ind_i1_range = begin_i1
+    do i2 = 1, gs(2)
+        do ind_for_i1 = ind_i1_range+1, ind_i1_range + cartography(2+i2,ind_proc), 2
+            do i1 = cartography(ind_for_i1,ind_proc), cartography(ind_for_i1+1,ind_proc)
+                beg_sca = cartography(ind_1Dtable+1,ind_proc)+gap
+                end_sca = cartography(ind_1Dtable+2,ind_proc)+gap
+                end_buffer = beg_buffer + end_sca - beg_sca
+                scalar(i+i1-1,j+i2-1,beg_sca:end_sca) = scalar(i+i1-1,j+i2-1,beg_sca:end_sca) &
+                    & + buffer(beg_buffer:end_buffer)
+                beg_buffer = end_buffer + 1
+                ind_1Dtable = ind_1Dtable + 2
+            end do
+        end do
+        ind_i1_range = ind_i1_range + cartography(2+i2,ind_proc)
+    end do
+
+end subroutine advecZ_remesh_buffer_to_scalar
+
+
+! ====================================================================
+! ====================    Initialize particle     ====================
+! ====================================================================
+
+!> Creation and initialisation of a group of particle line
+!!    @param[in]    Vz          = 3D velocity field
+!!    @param[in]    i           = X-indice of the current line
+!!    @param[in]    j           = Y-indice of the current line
+!!    @param[in]    Gsize       = size of groups (along Z direction)
+!!    @param[out]   p_V         = particle velocity
+subroutine advecZ_init_group(Vz, i, j, Gsize, p_V)
+
+    use cart_topology   ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                         :: i,j
+    integer, dimension(2), intent(in)           :: Gsize
+    real(WP), dimension(:,:,:),intent(out)      :: p_V
+    real(WP), dimension(:,:,:), intent(in)      :: Vz
+    ! Other local variables
+    integer                                     :: ind          ! indice
+    integer                                     :: i_gp, j_gp   ! X and Y indice of the current line in the group
+
+    do j_gp = 1, Gsize(2)
+        do i_gp = 1, Gsize(1)
+            do ind = 1, mesh_sc%N_proc(direction)
+                p_V(ind, i_gp, j_gp)        = Vz(i+(i_gp-1),j+(j_gp-1), ind)
+            end do
+        end do
+    end do
+
+end subroutine advecZ_init_group
+
+
+! ######################################################################################
+! #####                                                                            #####
+! #####                         Private procedure                                  #####
+! #####                                                                            #####
+! ######################################################################################
+
+! ==================================================================================================================================
+! ====================     Compute scalar slope for introducing limitator (against numerical oscillations)      ====================
+! ==================================================================================================================================
+
+!> Compute scalar slopes for introducing limitator
+!!    @param[in]        gp_s        = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        p_pos       = particles position
+!!    @param[in]        scalar      = scalar advected by particles
+!!    @param[out]       limit       = limitator function
+!! @details
+!!        This subroutine work on a groupe of line. For each line of this group, it
+!!    determine the type of each block of this line and where corrected remeshing
+!!    formula are required. In those points, it tagg block transition (ie the end of
+!!    the current block and the beginning of the following one) in order to indicate
+!!    that corrected weigth have to be used during the remeshing.
+!!         Note that the subroutine actually computes limitator/8 as this is the
+!!    expression which is used inside the remeshing formula and directly computes it
+!!    minimize the number of operations.
+subroutine advecZ_limitator_group(gp_s, ind_group, i, j, p_pos, &
+                & scalar, limit)
+
+    
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+    use advec_correction! contains limitator computation
+    use precision_tools       ! define working precision_tools (double or simple)
+
+    integer, dimension(2),intent(in)                            :: gp_s         ! groupe size
+    integer, dimension(2), intent(in)                           :: ind_group    ! group indice
+    integer , intent(in)                                        :: i,j          ! bloc coordinates
+    real(WP), dimension(:,:,:), intent(in)                      :: p_pos        ! particle position
+    real(WP), dimension(:,:,:), intent(in)                      :: scalar       ! scalar field to advect
+    real(WP), dimension(:,:,:), intent(out)                     :: limit        ! limitator function
+
+    ! Local variables
+    real(WP),dimension(gp_s(1),gp_s(2),2)                       :: Sbuffer, Rbuffer ! buffer to exchange scalar or limitator at boundaries with neighbors.
+    real(WP),dimension(gp_s(1),gp_s(2),mesh_sc%N_proc(direction)+1)     :: deltaS       ! first order scalar variation
+    integer                                                     :: ind          ! loop indice on particle indice
+    integer                                                     :: send_request ! mpi status of nonblocking send
+    integer                                                     :: rece_request ! mpi status of nonblocking receive
+    integer, dimension(MPI_STATUS_SIZE)                         :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(MPI_STATUS_SIZE)                         :: send_status  ! mpi status (for mpi_wait)
+    integer, dimension(2)                                       :: tag_table    ! other tags for mpi message
+    integer                                                     :: com_size     ! size of mpi message
+    integer                                                     :: ierr         ! mpi error code
+
+    ! ===== Initialisation =====
+    com_size = 2*gp_s(1)*gp_s(2)
+
+    ! ===== Exchange ghost =====
+    ! Receive ghost value, ie value from neighbors boundaries.
+    tag_table = compute_tag(ind_group, tag_part_slope, direction)
+    call mpi_Irecv(Rbuffer(1,1,1), com_size, MPI_REAL_WP, &
+            & neighbors(direction,1), tag_table(1), D_comm(direction), rece_request, ierr)
+    ! Send ghost for the two first scalar values of each line
+    Sbuffer = scalar(i:i+gp_s(1)-1,j:j+gp_s(2)-1,1:2)
+    call mpi_ISsend(Sbuffer(1,1,1), com_size, MPI_REAL_WP, &
+            & neighbors(direction,-1), tag_table(1), D_comm(direction), send_request, ierr)
+
+    ! ===== Compute scalar variation =====
+    ! -- For the "middle" block --
+    do ind = 1, mesh_sc%N_proc(direction)-1
+        deltaS(:,:,ind) = scalar(i:i+gp_s(1)-1,j:j+gp_s(2)-1,ind+1) &
+                        & - scalar(i:i+gp_s(1)-1,j:j+gp_s(2)-1,ind)
+    end do
+    ! -- For the last elements of each line --
+    ! Check reception
+    call mpi_wait(rece_request, rece_status, ierr)
+    ! Compute delta
+    deltaS(:,:,mesh_sc%N_proc(direction)) = Rbuffer(:,:,1) &
+                                    & - scalar(i:i+gp_s(1)-1,j:j+gp_s(2)-1,ind)   ! scalar(N+1) - scalar(N)
+    deltaS(:,:,mesh_sc%N_proc(direction)+1) = Rbuffer(:,:,2) - Rbuffer(:,:,1)   ! scalar(N+1) - scalar(N)
+
+
+    ! ===== Compute slope and limitator =====
+    call AC_limitator_from_slopes(direction, gp_s, p_pos, deltaS,   &
+            & limit, tag_table(2), com_size)
+
+    ! ===== Close mpi_ISsend when done =====
+    call mpi_wait(send_request, send_status, ierr)
+
+end subroutine advecZ_limitator_group
+
+
+end module advecZ
+!> @}
diff --git a/HySoP/src/scalesReduced/particles/advec_Vector.f90 b/HySoP/src/scalesReduced/particles/advec_Vector.f90
new file mode 100644
index 0000000000000000000000000000000000000000..6ddef2ca6290543c7c79e91df5a3ab5bad1b4922
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_Vector.f90
@@ -0,0 +1,263 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec
+!
+!
+! DESCRIPTION:
+!> The module advec provides all public interfaces to solve an advection equation
+!! with a particle method.
+!
+!> @details
+!!     This module contains the generic procedure to initialize and parametrise the
+!! advection solver based on particles method. It also contains the subroutine
+!! "advec_step" wich solves the equation for a given time step. It is the only one
+!! module which is supposed to be included by a code using this library of
+!! particle methods.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+module advec_Vect
+
+    use advec, only : advec_init
+
+    use precision_tools
+    use advec_abstract_proc
+    implicit none
+
+    ! ===== Private variables =====
+    !> numerical method use to advect the scalar
+    character(len=str_short), private   :: type_part_solv
+    !> dimensionnal splitting (eg classical, Strang or particle)
+    character(len=str_short), private   :: dim_splitting
+
+
+    ! ===== Public procedures =====
+    ! Scheme used to advec the scalar (order 2 or 4 ?)
+!    public                              :: type_part_solver
+
+    ! Advection methods
+!    public                              :: advec_init           ! initialize the scalar solver
+    public                              :: advec_step_Vect      ! advec the scalar field during a time step.
+!    procedure(advec_step_Torder2), pointer, public    :: advec_step => null()
+!    public                              :: advec_step_Torder1   ! advec the scalar field during a time step.
+!    public                              :: advec_step_Torder2   ! advec the scalar field during a time step.
+!
+    ! Remeshing formula
+    procedure(AC_remesh), pointer, private :: advec_remesh_bis => null()
+
+contains
+
+! ===== Public methods =====
+
+!> Return the name of the particle method used for the advection
+!!    @return type_part_solver      = numerical method used for advection
+function type_part_solver()
+    character(len=str_short)    :: type_part_solver
+
+    type_part_solver = type_part_solv
+end function
+
+!> Solve advection equation - order 2 - with basic velocity interpolation
+!!    @param[in]        dt          = time step
+!!    @param[in]        Vx          = velocity along x (could be discretised on a bigger mesh then the scalar)
+!!    @param[in]        Vy          = velocity along y
+!!    @param[in]        Vz          = velocity along z
+!!    @param[in,out]    VectX       = X component of vector to advect
+!!    @param[in,out]    VectY       = Y component of vector to advect
+!!    @param[in,out]    VectZ       = Z component of vector to advect
+subroutine advec_step_Inter_basic_Vect(dt, Vx, Vy, Vz, vectX, vectY, vectZ)
+
+    use Interpolation_velo
+
+    ! Input/Output
+    real(WP), intent(in)                        :: dt
+    real(WP), dimension(:,:,:), intent(in)      :: Vx, Vy, Vz
+    real(WP), dimension(:,:,:), intent(inout)   :: vectX, vectY, vectZ
+    ! Local
+    real(WP), dimension(:,:,:), allocatable   :: Vx_f, Vy_f, Vz_f
+    integer                                   :: ierr                ! Error code.
+
+    allocate(Vx_f(mesh_sc%N_proc(1),mesh_sc%N_proc(2),mesh_sc%N_proc(3)),stat=ierr)
+    if (ierr/=0) write(6,'(a,i0,a)') '[ERROR] on cart_rank ', cart_rank, ' - not enough memory for Vx_f'
+    allocate(Vy_f(mesh_sc%N_proc(1),mesh_sc%N_proc(2),mesh_sc%N_proc(3)),stat=ierr)
+    if (ierr/=0) write(6,'(a,i0,a)') '[ERROR] on cart_rank ', cart_rank, ' - not enough memory for Vy_f'
+    allocate(Vz_f(mesh_sc%N_proc(1),mesh_sc%N_proc(2),mesh_sc%N_proc(3)),stat=ierr)
+    if (ierr/=0) write(6,'(a,i0,a)') '[ERROR] on cart_rank ', cart_rank, ' - not enough memory for Vz_f'
+
+    call Interpol_3D(Vx, mesh_V%dx, Vx_f, mesh_sc%dx)
+    call Interpol_3D(Vy, mesh_V%dx, Vy_f, mesh_sc%dx)
+    call Interpol_3D(Vz, mesh_V%dx, Vz_f, mesh_sc%dx)
+    if (cart_rank==0) write(6,'(a)') '        [INFO PARTICLES] Interpolation done'
+
+    call advec_step_Vect(dt, Vx_f, Vy_f, Vz_f, vectX, vectY, vectZ)
+
+    deallocate(Vx_f)
+    deallocate(Vy_f)
+    deallocate(Vz_f)
+
+end subroutine advec_step_Inter_basic_Vect
+
+!> Solve advection equation - order 2 in time (order 2 dimensional splitting)
+!!    @param[in]        dt          = time step
+!!    @param[in]        Vx          = velocity along x (could be discretised on a bigger mesh then the scalar)
+!!    @param[in]        Vy          = velocity along y
+!!    @param[in]        Vz          = velocity along z
+!!    @param[in,out]    VectX       = X component of vector to advect
+!!    @param[in,out]    VectY       = Y component of vector to advect
+!!    @param[in,out]    VectZ       = Z component of vector to advect
+subroutine advec_step_Vect(dt, Vx, Vy, Vz, vectX, vectY, vectZ)
+
+    use advec, only : advec_setup_alongX, advec_setup_alongY, &
+        & advec_setup_alongZ, gsX, gsY, gsZ
+    use advecX          ! Method to advec along X
+    use advecY          ! Method to advec along Y
+    use advecZ          ! Method to advec along Z
+
+    ! Input/Output
+    real(WP), intent(in)                        :: dt
+    real(WP), dimension(:,:,:), intent(in)      :: Vx, Vy, Vz
+    real(WP), dimension(:,:,:), intent(inout)   :: vectX, vectY, vectZ
+
+    call advec_setup_alongX()
+    call advec_vector_X_basic_no_com(dt/2.0, gsX, Vx, vectX, vectY, vectZ)
+    call advec_setup_alongY()
+    call advec_vector_1D_basic(dt/2.0, gsY, Vy, vectX, vectY, vectZ)
+    call advec_setup_alongZ()
+    call advec_vector_1D_basic(dt/2.0, gsZ, Vz, vectX, vectY, vectZ)
+    call advec_vector_1D_basic(dt/2.0, gsZ, Vz, vectX, vectY, vectZ)
+    call advec_setup_alongY()
+    call advec_vector_1D_basic(dt/2.0, gsY, Vy, vectX, vectY, vectZ)
+    call advec_setup_alongX()
+    call advec_vector_X_basic_no_com(dt/2.0, gsX, Vx, vectX, vectY, vectZ)
+
+end subroutine advec_step_Vect
+
+
+!> Scalar advection along one direction (this procedure call the right solver, depending on the simulation setup).
+!! Variant for advection of a 3D-vector.
+!!    @param[in]        dt          = time step
+!!    @param[in]        gs          = size of the work item along transverse direction
+!!    @param[in]        V_comp      = velocity component
+!!    @param[in,out]    VectX       = X component of vector to advect
+!!    @param[in,out]    VectY       = Y component of vector to advect
+!!    @param[in,out]    VectZ       = Z component of vector to advect
+subroutine advec_vector_1D_basic(dt, gs, V_comp, vectX, vectY, vectZ)
+
+    use advec, only : advec_init_velo, advec_remesh, line_dir, gp_dir1, gp_dir2
+    use advecX, only : advecX_init_group    ! procdure devoted to advection along Z
+    use advecY, only : advecY_init_group    ! procdure devoted to advection along Z
+    use advecZ, only : advecZ_init_group    ! procdure devoted to advection along Z
+    use advec_variables ! contains info about solver parameters and others.
+    use cart_topology   ! Description of mesh and of mpi topology
+    use advec_common    ! some procedures common to advection along all line_dirs
+
+    ! Input/Output
+    real(WP), intent(in)                          :: dt
+    integer, dimension(2), intent(in)             :: gs
+    real(WP), dimension(:,:,:), intent(in)        :: V_comp
+    real(WP), dimension(:,:,:), intent(inout)     :: vectX, vectY, vectZ
+    ! Other local variables
+    integer                                       :: i,j          ! indice of the currend mesh point
+    integer, dimension(2)                         :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP), dimension(mesh_sc%N_proc(line_dir),gs(1),gs(2))  :: p_pos_adim ! adimensionned particles position
+    real(WP), dimension(mesh_sc%N_proc(line_dir),gs(1),gs(2))  :: p_V        ! particles velocity
+
+    ind_group = 0
+
+    do j = 1, mesh_sc%N_proc(gp_dir2), gs(2)
+        ind_group(2) = ind_group(2) + 1
+        ind_group(1) = 0
+        do i = 1, mesh_sc%N_proc(gp_dir1), gs(1)
+            ind_group(1) = ind_group(1) + 1
+
+            ! ===== Init particles =====
+            call advec_init_velo(V_comp, i, j, gs, p_pos_adim)
+            ! p_pos is used to store velocity at grid point
+            call AC_get_p_pos_adim(p_V, p_pos_adim, 0.5_WP*dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+            ! p_V = middle point position = position at middle point for RK2 scheme
+
+            ! ===== Advection =====
+            ! -- Compute velocity (with a RK2 scheme) --
+            ! Note that p_pos is used as velocity component storage
+            call AC_interpol_lin(line_dir, gs, ind_group, p_pos_adim, p_V)
+            ! p_v = velocity at middle point position
+            ! -- Push particles --
+            call AC_get_p_pos_adim(p_pos_adim, p_V, dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+            ! Now p_pos = particle position and p_V = particle velocity
+
+            ! ===== Remeshing =====
+            call advec_remesh(line_dir, ind_group, gs, p_pos_adim, p_V, i,j,vectX, dt)
+            call advec_remesh(line_dir, ind_group, gs, p_pos_adim, p_V, i,j,vectY, dt)
+            call advec_remesh(line_dir, ind_group, gs, p_pos_adim, p_V, i,j,vectZ, dt)
+
+        end do
+    end do
+
+end subroutine advec_vector_1D_basic
+
+!> Scalar advection along one direction - variant for cases with no communication
+!!    @param[in]        dt          = time step
+!!    @param[in]        V_comp      = velocity along X (could be discretised on a bigger mesh then the scalar)
+!!    @param[in,out]    scal3D      = scalar field to advect
+!> Details
+!!   Work only for direction = X. Basic (and very simple) remeshing has just to
+!! be add for other direction.
+subroutine advec_vector_X_basic_no_com(dt, gs, V_comp, vectX, vectY, vectZ)
+
+    use advec, only : advec_init_velo, advec_remesh, line_dir, gp_dir1, gp_dir2
+    use advecX          ! Procedure specific to advection along X
+    use advec_common    ! Some procedures common to advection along all directions
+    use advec_variables ! contains info about solver parameters and others.
+    use cart_topology   ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    real(WP), intent(in)                          :: dt
+    integer, dimension(2), intent(in)             :: gs
+    real(WP), dimension(:,:,:), intent(in)        :: V_comp
+    real(WP), dimension(:,:,:), intent(inout)     :: vectX, vectY, vectZ
+    ! Other local variables
+    integer                                             :: j,k          ! indice of the currend mesh point
+    integer, dimension(2)                               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP),dimension(mesh_sc%N_proc(line_dir),gs(1),gs(2)) :: p_pos_adim   ! adimensionned particles position
+    real(WP),dimension(mesh_sc%N_proc(line_dir),gs(1),gs(2)) :: p_V          ! particles velocity
+
+    ind_group = 0
+
+    do k = 1, mesh_sc%N_proc(gp_dir2), gs(2)
+        ind_group(2) = ind_group(2) + 1
+        ind_group(1) = 0
+        do j = 1, mesh_sc%N_proc(gp_dir1), gs(1)
+            ind_group(1) = ind_group(1) + 1
+
+            ! ===== Init particles =====
+            ! p_pos is used to store velocity at grid point
+            call advec_init_velo(V_comp, j, k, gs, p_pos_adim)
+            ! p_V = middle point position = position at middle point for RK2 scheme
+            call AC_get_p_pos_adim(p_V, p_pos_adim, 0.5_WP*dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+
+            ! ===== Advection =====
+            ! -- Compute velocity (with a RK2 scheme): p_V = velocity at middle point position --
+            ! Note that p_pos is used as velocity component storage
+            call AC_interpol_lin_no_com(line_dir, gs, p_pos_adim, p_V)
+            ! p_v = velocity at middle point position
+            ! -- Push particles --
+            call AC_get_p_pos_adim(p_pos_adim, p_V, dt, mesh_sc%dx(line_dir), mesh_sc%N_proc(line_dir))
+            ! Now p_pos = particle position and p_V = particle velocity
+
+            ! ===== Remeshing =====
+            call advecX_remesh_no_com(ind_group, gs, p_pos_adim, p_V, j, k, vectX, dt)
+            call advecX_remesh_no_com(ind_group, gs, p_pos_adim, p_V, j, k, vectY, dt)
+            call advecX_remesh_no_com(ind_group, gs, p_pos_adim, p_V, j, k, vectZ, dt)
+
+        end do
+    end do
+
+end subroutine advec_vector_X_basic_no_com
+
+
+end module advec_Vect
diff --git a/HySoP/src/scalesReduced/particles/advec_common_group.F90 b/HySoP/src/scalesReduced/particles/advec_common_group.F90
new file mode 100644
index 0000000000000000000000000000000000000000..735d5457d5e5e228518510aaf0d491de6cb2ad01
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_common_group.F90
@@ -0,0 +1,185 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_common
+!
+!
+! DESCRIPTION:
+!> The module ``advec_common'' gather function and subroutines used to advec scalar
+!! which are not specific to a direction
+!! @details
+!! This module gathers functions and routines used to advec scalar which are not
+!! specific to a direction. This is a parallel implementation using MPI and
+!! the cartesien topology it provides. It also contains the variables common to
+!! the solver along each direction and other generic variables used for the
+!! advection based on the particle method.
+!!
+!! Except for testing purpose, this module is not supposed to be used by the
+!! main code but only by the other advection module. More precisly, an final user
+!! must only used the generic "advec" module wich contain all the interface to
+!! solve the advection equation with the particle method, and to choose the
+!! remeshing formula, the dimensionnal splitting and everything else.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_common
+
+    use precision_tools
+
+    ! Velocity interpolation at particle position
+    use advec_common_interpol ,only:AC_interpol_lin, AC_interpol_lin_no_com, &
+      & AC_interpol_plus, AC_interpol_plus_no_com
+    ! Particles remeshing
+    use advec_common_remesh,only: AC_setup_init,                &
+            & AC_remesh_setup_alongX, AC_remesh_setup_alongY, AC_remesh_setup_alongZ,&
+            & AC_remesh_lambda_group, AC_remesh_limit_lambda_group, AC_remesh_Mprime_group
+
+    implicit none
+
+    ! To get particle position - if particles are created everywhere
+    interface AC_get_p_pos_adim
+      module procedure AC_init_pos, AC_get_pos_V, AC_get_pos_other_mesh, AC_get_pos_other_mesh_big
+    end interface AC_get_p_pos_adim
+    public :: AC_get_p_pos_adim
+    private:: AC_init_pos
+    private:: AC_get_pos_V
+    private:: AC_get_pos_other_mesh
+
+
+contains
+
+!> Init particle position at mesh point
+!!   @param[out] p_pos = adimensionned particle position
+subroutine AC_init_pos(p_pos)
+
+    real(WP), dimension(:,:,:), intent(out) :: p_pos
+
+    integer :: i2,i1,i_p
+
+    do i2 = 1, size(p_pos,3)
+      do i1 = 1, size(p_pos,2)
+        do i_p = 1, size(p_pos,1)
+          p_pos(i_p,i1,i2) = i_p
+        end do
+      end do
+    end do
+    !do i_p = 1, size(p_pos,1)
+    !  p_pos(i_p,:,:) = i_p
+    !end do
+
+end subroutine AC_init_pos
+
+
+!> Init particle position (adimensionned by dx) at initial position + dt*velocity
+!!   @param[in]  p_pos  = adimensionned particle position
+!!   @param[in]  p_V    = particle velocity
+!!   @param[in]  dt     = time step
+!!   @param[in]  dx_sc  = spatial step for scalar
+!!   @param[in]  Np     = number of particle for each line (=number of mesh point along current direction)
+subroutine AC_get_pos_V(p_pos, p_V, dt, dx_sc, Np)
+
+    real(WP), dimension(:,:,:), intent(out) :: p_pos
+    real(WP), dimension(:,:,:), intent(in)  :: p_V
+    real(WP)                  , intent(in)  :: dt, dx_sc
+    integer                   , intent(in)  :: Np
+
+    integer :: i2,i1,i_p
+    real(WP):: coef
+
+    coef = dt/dx_sc
+    do i2 = 1, size(p_pos,3)
+      do i1 = 1, size(p_pos,2)
+        do i_p = 1, Np
+          p_pos(i_p,i1,i2) = i_p + coef*p_V(i_p,i1,i2)
+        end do
+      end do
+    end do
+    !do i_p = 1, size(p_pos,1)
+    !  p_pos(i_p,:,:) = i_p + coef*p_V(i_p,:,:)
+    !end do
+
+end subroutine AC_get_pos_V
+
+
+!> Init particle position (adimensionned by dx_V) at initial position +
+!! dt*velocity - use this variant if velocity and scalr resolution are different.
+!!   @param[in]  p_pos  = adimensionned particle position
+!!   @param[in]  p_V    = particle velocity
+!!   @param[in]  dt     = time step
+!!   @param[in]  dx_sc  = spatial step for scalar
+!!   @param[in]  dx_V   = spatial step for velocity
+!!   @param[in]  Np     = number of particle for each line (=number of mesh point along current direction)
+subroutine AC_get_pos_other_mesh(p_pos, p_V, dt, dx_sc, dx_V, Np)
+
+    real(WP), dimension(:,:,:), intent(out) :: p_pos
+    real(WP), dimension(:,:,:), intent(in)  :: p_V
+    real(WP)                  , intent(in)  :: dt, dx_sc, dx_V
+    integer                   , intent(in)  :: Np
+
+    integer :: i2,i1,i_p
+    real(WP):: coef1, coef2
+
+    coef1 = dx_sc/dx_V
+    coef2 = dt/dx_V
+    do i2 = 1, size(p_pos,3)
+      do i1 = 1, size(p_pos,2)
+        do i_p = 1, Np
+          p_pos(i_p,i1,i2) = (coef1*i_p) + (coef2*p_V(i_p,i1,i2))
+        end do
+      end do
+    end do
+    !do i_p = 1, size(p_pos,1)
+    !  p_pos(i_p,:,:) = (coef1*i_p) + (coef2*p_V(i_p,:,:))
+    !end do
+
+end subroutine AC_get_pos_other_mesh
+
+
+!> Init particle position (adimensionned by dx_V) at initial position +
+!! dt*velocity - use this variant if velocity and scalar resolution are different
+!! and if V_comp contain not only velocity for the current work item.
+!!   @param[in]  p_pos  = adimensionned particle position
+!!   @param[in]  p_V    = particle velocity
+!!   @param[in]  dt     = time step
+!!   @param[in]  dx_sc  = spatial step for scalar
+!!   @param[in]  dx_V   = spatial step for velocity
+!!   @param[in]  id1,id2= coordinate of the current work item
+!!   @param[in]  Np     = number of particle for each line (=number of mesh point along current direction)
+subroutine AC_get_pos_other_mesh_big(p_pos, p_V, dt, dx_sc, dx_V, Np, id1, id2)
+
+    real(WP), dimension(:,:,:), intent(out) :: p_pos
+    real(WP), dimension(:,:,:), intent(in)  :: p_V
+    real(WP)                  , intent(in)  :: dt, dx_sc, dx_V
+    integer                   , intent(in)  :: id1, id2, Np
+
+    integer :: i2,i1,i_p, idir1, idir2
+    real(WP):: coef1, coef2
+
+    idir1 = id1 - 1
+    idir2 = id2 - 1
+
+    coef1 = dx_sc/dx_V
+    coef2 = dt/dx_V
+
+    do i2 = 1, size(p_pos,3)
+      do i1 = 1, size(p_pos,2)
+        do i_p = 1, Np
+          p_pos(i_p,i1,i2) = (coef1*i_p) + (coef2*p_V(i_p,i1+idir1,i2+idir2))
+        end do
+      end do
+    end do
+    !do i_p = 1, size(p_pos,1)
+    !  p_pos(i_p,:,:) = (coef1*i_p) + (coef2*p_V(i_p,:,:))
+    !end do
+
+end subroutine AC_get_pos_other_mesh_big
+
+end module advec_common
diff --git a/HySoP/src/scalesReduced/particles/advec_common_interpol.F90 b/HySoP/src/scalesReduced/particles/advec_common_interpol.F90
new file mode 100644
index 0000000000000000000000000000000000000000..c967e14972d684029acbc189a35c4c4aa8dd200d
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_common_interpol.F90
@@ -0,0 +1,1298 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_common_velo
+!
+!
+! DESCRIPTION:
+!> The module ``advec_common_interpol'' gather function and subroutines used to interpolate
+!! some quantities (velocity for instance) at particle position. Theses tools are specific to a direction
+!! @details
+!! This module gathers functions and routines used to interpolate some field
+!! at scalar position. These subroutines are not specific to a direction.
+!! This is a parallel implementation using MPI and the cartesien topology it
+!! provides.
+!!
+!! Except for testing purpose, this module is not supposed to be used by the
+!! main code but only by the other advection module. More precisly, an final user
+!! must only used the generic "advec" module wich contain all the interface to
+!! solve the advection equation with the particle method, and to choose the
+!! remeshing formula, the dimensionnal splitting and everything else. Except for
+!! testing purpose, the other advection modules have only to include
+!! "advec_common".
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_common_interpol
+
+    use structure_tools
+    use advec_abstract_proc
+    use mpi, only:MPI_INTEGER, MPI_ANY_SOURCE, MPI_STATUS_SIZE
+    implicit none
+
+
+    ! Information about the particles and their bloc
+    public
+
+
+    ! ===== Public procedures =====
+    !----- To interpolate velocity -----
+    public                        :: AC_interpol_lin
+    public                        :: AC_interpol_plus
+    public                        :: AC_interpol_lin_no_com
+    public                        :: AC_interpol_determine_communication
+
+    ! ===== Public variables =====
+
+    ! ===== Private variables =====
+
+
+contains
+
+! ===== Public procedure =====
+
+! ==================================================================================
+! ====================     Compute particle velocity (RK2)      ====================
+! ==================================================================================
+
+!> Interpolate the velocity field used in a RK2 scheme for particle advection -
+!! version for a group of (more of one) line
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        gs          = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        p_inter     = adimensionned particle postion in input ; return inteprolated field as output
+!!    @param[in,out]    V_comp      = field to interpolate at particle position
+!! @details
+!!    A RK2 scheme is used to advect the particles : the midlle point scheme. An
+!!    intermediary position "p_pos_bis(i) = p_pos(i) + V(i)*dt/2" is computed and then
+!!    the numerical velocity of each particles is computed as the interpolation of V  in
+!!    this point. This field is used to advect the particles at the seconde order in time :
+!!    p_pos(t+dt, i) = p_pos(i) + p_V(i).
+!!    The group line indice is used to ensure using unicity of each mpi message tag.
+!!    The interpolation is done for a group of lines, allowing to mutualise
+!!    communications. Considering a group of Na X Nb lines, communication performed
+!!    by this algorithm are around (Na x Nb) bigger than the alogorithm wich
+!!    works on a single line but also around (Na x Nb) less frequent.
+subroutine AC_interpol_lin(direction, gs, ind_group, V_comp, p_inter)
+
+    ! This code involve a recopy of V_comp. It is possible to directly use the 3D velocity field but in a such code
+    ! a memory copy is still needed to send velocity field to other processus : mpi send contiguous memory values
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Ouput
+    integer, intent(in)                             :: direction    ! current direction
+    integer, dimension(2),intent(in)                :: gs           ! groupe size
+    integer, dimension(2), intent(in)               :: ind_group
+    real(WP), dimension(:,:,:), intent(inout)       :: p_inter
+    real(WP), dimension(:,:,:),intent(in),target    :: V_comp
+#ifdef BLOCKING_SEND_PLUS
+    real(WP)                                                    :: weight       ! interpolation weight storage
+#else
+    type(real_pter),dimension(mesh_sc%N_proc(direction),gs(1),gs(2))    :: Vp, Vm       ! Velocity on previous and next mesh point
+#endif
+    real(WP), dimension(:), allocatable, target                 :: V_buffer     ! Velocity buffer for postion outside of the local subdomain
+    integer, dimension(:), allocatable                          :: pos_in_buffer! buffer size
+    integer , dimension(gs(1), gs(2))           :: rece_ind_min ! minimal indice of mesh involved in remeshing particles (of my local subdomains)
+    integer , dimension(gs(1), gs(2))           :: rece_ind_max ! maximal indice of mesh involved in remeshing particles (of my local subdomains)
+    integer                                     :: ind, ind_com ! indices
+    integer                                     :: i1, i2       ! indices in the lines group
+    integer                                     :: pos, pos_old ! indices of the mesh point wich preceed the particle position
+    integer                                     :: proc_gap, gap! distance between my (mpi) coordonate and coordinate of the
+                                                                ! processus associated to a given position
+    integer                                     :: proc_end     ! final indice of processus associate to current pos
+    logical, dimension(2)                       :: myself
+    integer, dimension(:), allocatable          :: send_carto   ! cartogrpahy of what I have to send
+    integer                                     :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
+    integer                                     :: ind_for_i1   ! where to read the first coordinate (i1) of the current line inside the cartography ?
+    real(WP), dimension(:), allocatable         :: send_buffer  ! to store what I have to send (on a contiguous way)
+    integer, dimension(gs(1),gs(2),2)           :: rece_gap     ! distance between me and processus wich send me information
+    integer, dimension(2 , 2)                   :: send_gap     ! distance between me and processus to wich I send information
+    integer, dimension(2)                       :: rece_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer                                     :: com_size     ! size of message send/receive
+    integer, dimension(:), allocatable          :: size_com     ! size of message send/receive
+    integer                                     :: min_size     ! minimal size of cartography(:,proc_gap)
+    integer                                     :: max_size     ! maximal size of cartography(:,proc_gap)
+    integer                                     :: tag          ! mpi message tag
+    integer, dimension(:), allocatable          :: tag_proc     ! mpi message tag
+    integer                                     :: ierr         ! mpi error code
+#ifndef BLOCKING_SEND
+   integer, dimension(:), allocatable          :: s_request    ! mpi communication request (handle) of nonblocking send
+#endif
+    integer, dimension(:), allocatable          :: s_request_bis! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable          :: rece_request ! mpi communication request (handle) of nonblocking receive
+    integer, dimension(MPI_STATUS_SIZE)         :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(:,:), allocatable        :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block for wich the
+                                                                ! current processus requiers data from proc_gap and for each of these lines the range
+                                                                ! of mesh points from where it requiers the velocity values.
+
+    ! -- Initialisation --
+#ifndef BLOCKING_SEND_PLUS
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            do ind = 1, mesh_sc%N_proc(direction)
+                nullify(Vp(ind,i1,i2)%pter)
+                nullify(Vm(ind,i1,i2)%pter)
+            end do
+        end do
+    end do
+#endif
+
+    ! Compute range of the set of point where I need the velocity value
+    rece_ind_min = floor(p_inter(1,:,:))
+    rece_ind_max = floor(p_inter(mesh_sc%N_proc(direction),:,:)) + 1
+
+    ! ===== Exchange velocity field if needed =====
+    ! It uses non blocking message to do the computations during the communication process
+    ! -- What have I to communicate ? --
+    rece_gap(:,:,1) = floor(real(rece_ind_min-1, WP)/mesh_sc%N_proc(direction))
+    rece_gap(:,:,2) = floor(real(rece_ind_max-1, WP)/mesh_sc%N_proc(direction))
+    rece_gap_abs(1) = minval(rece_gap(:,:,1))
+    rece_gap_abs(2) = maxval(rece_gap(:,:,2))
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(cartography(max_size,rece_gap_abs(1):rece_gap_abs(2)))
+    call AC_interpol_determine_communication(direction, ind_group, gs, send_gap,  &
+    & rece_gap, rece_gap_abs, cartography)
+
+    ! -- Send messages about what I want --
+    allocate(s_request_bis(rece_gap_abs(1):rece_gap_abs(2)))
+    allocate(size_com(rece_gap_abs(1):rece_gap_abs(2)))
+    allocate(tag_proc(rece_gap_abs(1):rece_gap_abs(2)))
+    min_size = 2 + gs(2)
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            cartography(1,proc_gap) = 0
+            ! Use the cartography to know which lines are concerned
+            size_com(proc_gap) = cartography(2,proc_gap)
+            ! Range I want - store into the cartography
+            gap = proc_gap*mesh_sc%N_proc(direction)
+            ! Position in cartography(:,proc_gap) of the current i1 indice
+            ind_for_i1 = min_size
+            do i2 = 1, gs(2)
+                do ind = ind_for_i1+1, ind_for_i1 + cartography(2+i2,proc_gap), 2
+                    do i1 = cartography(ind,proc_gap), cartography(ind+1,proc_gap)
+                        ! Interval start from:
+                        cartography(size_com(proc_gap)+1,proc_gap) = max(rece_ind_min(i1,i2), gap+1) ! fortran => indice start from 0
+                        ! and ends at:
+                        cartography(size_com(proc_gap)+2,proc_gap) = min(rece_ind_max(i1,i2), gap+mesh_sc%N_proc(direction))
+                        ! update number of element to receive
+                        cartography(1,proc_gap) = cartography(1,proc_gap) &
+                                    & + cartography(size_com(proc_gap)+2,proc_gap) &
+                                    & - cartography(size_com(proc_gap)+1,proc_gap) + 1
+                        size_com(proc_gap) = size_com(proc_gap)+2
+                    end do
+                end do
+                ind_for_i1 = ind_for_i1 + cartography(2+i2,proc_gap)
+            end do
+            ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.
+            tag_proc(proc_gap) = compute_tag(ind_group, tag_velo_range, direction, proc_gap)
+            ! Send message
+#ifdef PART_DEBUG
+            if(size_com(proc_gap)>max_size) then
+                print*, 'rank = ', cart_rank, ' -- bug sur taille cartography a envoyer'
+                print*, 'taille carto = ', com_size, ' plus grand que la taille théorique ', &
+                    & max_size, ' et carto = ', cartography(:,proc_gap)
+            end if
+#endif
+            call mpi_ISsend(cartography(1,proc_gap), size_com(proc_gap), MPI_INTEGER,   &
+                & neighbors(direction,proc_gap), tag_proc(proc_gap), D_comm(direction), &
+                & s_request_bis(proc_gap),ierr)
+        end if
+    end do
+
+
+    ! -- Non blocking reception of the velocity field --
+    ! Allocate the pos_in_buffer to compute V_buffer size and to be able to
+    ! allocate it.
+    allocate(pos_in_buffer(rece_gap_abs(1):rece_gap_abs(2)))
+    pos_in_buffer(rece_gap_abs(1)) = 1
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)-1
+        pos_in_buffer(proc_gap+1)= pos_in_buffer(proc_gap) + cartography(1,proc_gap)
+    end do
+    allocate(V_buffer(pos_in_buffer(rece_gap_abs(2)) &
+                & + cartography(1,rece_gap_abs(2))))
+    V_buffer = 0
+    allocate(rece_request(rece_gap_abs(1):rece_gap_abs(2)))
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            ! IIa - Compute reception tag
+            tag = compute_tag(ind_group, tag_velo_V, direction, -proc_gap)
+            ! IIb - Receive message
+            call mpi_Irecv(V_buffer(pos_in_buffer(proc_gap)), cartography(1,proc_gap), MPI_REAL_WP, &
+                    & neighbors(direction,proc_gap), tag, D_comm(direction), rece_request(proc_gap), ierr)
+        end if
+    end do
+
+    ! -- Send the velocity field to processus which need it --
+#ifndef BLOCKING_SEND
+   allocate(s_request(send_gap(1,1):send_gap(1,2)))
+#endif
+    allocate(send_carto(max_size))
+! XXX Todo : compter le nombre de messages à recevoir puis les traiter dans
+! l'ordre où ils arrivent via un MPI_ANY_PROC ? Mais alors il faut lier rang et
+! coordonnées ... ce qui signifie ajouter un appel à un mpi_cart_cood ... ou
+! envoyer le rand dans la cartographie !!
+! A voir ce qui est le mieux.
+    do proc_gap = send_gap(1,1), send_gap(1,2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            ! I - Receive messages about what I have to send
+            ! Ia - Compute reception tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.
+            tag = compute_tag(ind_group, tag_velo_range, direction, -proc_gap)
+            ! Ib - Receive the message
+            call mpi_recv(send_carto(1), max_size, MPI_INTEGER, neighbors(direction,proc_gap), &
+              & tag, D_comm(direction), rece_status, ierr)
+            ! II - Send it
+            ! IIa - Create send buffer
+            allocate(send_buffer(send_carto(1)))
+            gap = proc_gap*mesh_sc%N_proc(direction)
+            com_size = 0
+            ind_1Dtable = send_carto(2)
+            ! Position in cartography(:,proc_gap) of the current i1 indice
+            ind_for_i1 = min_size
+            do i2 = 1, gs(2)
+                do ind = ind_for_i1+1, ind_for_i1 + send_carto(2+i2), 2
+                    do i1 = send_carto(ind), send_carto(ind+1)
+                        do ind_com = send_carto(ind_1Dtable+1)+gap, send_carto(ind_1Dtable+2)+gap ! indice inside the current line
+                            com_size = com_size + 1
+                            send_buffer(com_size) = V_comp(ind_com, i1,i2)
+                        end do
+                        ind_1Dtable = ind_1Dtable + 2
+                    end do
+                end do
+                ind_for_i1 = ind_for_i1 + send_carto(2+i2)
+            end do
+            ! IIa_bis - check correctness
+#ifdef PART_DEBUG
+            if(com_size/=send_carto(1)) then
+                print*, 'rank = ', cart_rank, ' -- bug sur taille champ de vitesse a envoyer'
+                print*, 'taille carto = ', com_size, ' plus grand recu ', &
+                    & send_carto(1), ' et carto = ', send_carto(:)
+            end if
+#endif
+            ! IIb - Compute send tag
+            tag = compute_tag(ind_group, tag_velo_V, direction, proc_gap)
+            ! IIc - Send message
+#ifdef BLOCKING_SEND
+            call mpi_Send(send_buffer(1), com_size, MPI_REAL_WP,  &
+                    & neighbors(direction,proc_gap), tag, D_comm(direction),&
+                    & ierr)
+#else
+           call mpi_ISend(send_buffer(1), com_size, MPI_REAL_WP,  &
+                   & neighbors(direction,proc_gap), tag, D_comm(direction),&
+                   & s_request(proc_gap), ierr)
+#endif
+            deallocate(send_buffer)
+        end if
+    end do
+    deallocate(send_carto)
+
+    !-- Free som ISsend buffer and some array --
+! XXX Todo : préférer un call MPI_WAIT_ALL couplé avec une init de s_request_bis
+! sur MPI_REQUEST_NULL et enlever la boucle ET le if.
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            call MPI_WAIT(s_request_bis(proc_gap),rece_status,ierr)
+        end if
+    end do
+    deallocate(s_request_bis)
+    deallocate(cartography) ! We do not need it anymore
+    deallocate(tag_proc)
+    deallocate(size_com)
+
+#ifdef BLOCKING_SEND_PLUS
+    ! -- Compute the interpolate velocity --
+    ! Check if communication are done
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap)/=D_rank(direction)) then
+            call mpi_wait(rece_request(proc_gap), rece_status, ierr)
+        end if
+    end do
+    deallocate(rece_request)
+#endif
+
+    ! ===== Compute the interpolated velocity =====
+    ! -- Compute the interpolation weight and update the pointers Vp and Vm --
+    pos_in_buffer = pos_in_buffer - 1
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            ! Initialisation of reccurence process
+            ind = 1
+            pos = floor(p_inter(ind,i1,i2))
+#ifndef BLOCKING_SEND_PLUS
+            p_inter(ind,i1,i2) = p_inter(ind,i1,i2)-pos
+#else
+            weight = p_inter(ind,i1,i2)-pos
+#endif
+            ! Vm = V(pos)
+            proc_gap = floor(real(pos-1, WP)/mesh_sc%N_proc(direction))
+            if (neighbors(direction,proc_gap) == D_rank(direction)) then
+#ifndef BLOCKING_SEND_PLUS
+              Vm(ind,i1,i2)%pter => V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#else
+              p_inter(ind,i1,i2) = (1._WP-weight)*V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+              myself(1) = .true.
+            else
+              pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1  ! XXX New version only
+#ifndef BLOCKING_SEND_PLUS
+              Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+#else
+              p_inter(ind,i1,i2) = (1._WP-weight)*V_buffer(pos_in_buffer(proc_gap))
+#endif
+              myself(1) = .false.
+            end if
+            ! Vp = V(pos+1)
+            gap = floor(real(pos+1-1, WP)/mesh_sc%N_proc(direction))
+            if (neighbors(direction,gap) == D_rank(direction)) then
+#ifndef BLOCKING_SEND_PLUS
+              Vp(ind,i1,i2)%pter => V_comp(pos+1-gap*mesh_sc%N_proc(direction), i1,i2)
+#else
+              p_inter(ind,i1,i2) = p_inter(ind,i1,i2) + weight*V_comp(pos+1-gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+            else
+              pos_in_buffer(gap) = pos_in_buffer(gap) + 1  ! XXX New version only
+#ifndef BLOCKING_SEND_PLUS
+              Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(gap))
+#else
+              p_inter(ind,i1,i2) = p_inter(ind,i1,i2) + weight*V_buffer(pos_in_buffer(gap))
+#endif
+            end if
+            pos_old = pos
+            proc_end = (proc_gap+1)*mesh_sc%N_proc(direction)
+            myself(2) = (neighbors(direction,proc_gap+1) == D_rank(direction))
+
+
+            ! XXX New version XXX
+            ! Following indice: new version
+            ind = 2
+            if (ind<=mesh_sc%N_proc(direction)) pos = floor(p_inter(ind,i1,i2))
+            do while (ind<=mesh_sc%N_proc(direction))
+              !pos = floor(p_inter(ind,i1,i2))
+              if(myself(1)) then
+                ! -- Inside the current block, it is always the same --
+                do while ((pos<proc_end).and.(ind<mesh_sc%N_proc(direction)))
+                  ! Computation for current step
+#ifndef BLOCKING_SEND_PLUS
+                  p_inter(ind,i1,i2) = p_inter(ind,i1,i2)-pos
+                  Vm(ind,i1,i2)%pter => V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+                  Vp(ind,i1,i2)%pter => V_comp(pos+1-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#else
+                  !weight = p_inter(ind,i1,i2)-pos
+                  !p_inter = weight*Vp + (1-weight)*Vm = weight*(Vp-Vm) + Vm
+                  p_inter(ind,i1,i2) = (p_inter(ind,i1,i2)-pos)*(V_comp(pos+1-proc_gap*mesh_sc%N_proc(direction), i1,i2) &
+                    & - V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)) + V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                  ! Prepare next step
+                  pos_old = pos
+                  ind = ind + 1
+                  pos = floor(p_inter(ind,i1,i2))
+                end do ! ((pos<proc_end).and.(ind<mesh_sc%N_proc(direction)))
+                ! -- When we are exactly on the subdomain transition --
+                do while ((pos==proc_end).and.(ind<mesh_sc%N_proc(direction)))
+#ifndef BLOCKING_SEND_PLUS
+                  p_inter(ind,i1,i2) = p_inter(ind,i1,i2)-pos
+                  ! Vm is in the same sub-domain
+                  Vm(ind,i1,i2)%pter => V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                  ! Vp is in the next one (proc_gap+1)
+                  if(myself(2)) then
+#ifndef BLOCKING_SEND_PLUS
+                    Vp(ind,i1,i2)%pter => V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2)
+#else
+                    p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)*         &
+                      & (V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2) &
+                      & - V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2))    )&
+                      &  + V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                  else
+                    ! If pos = pos_old, we must have pos_in_buffer(proc_gap+1) += 0 (no changes)
+                    ! Else pos>pos_old, we must have pos_in_buffer(proc_gap+1) += 1
+                    ! We use that min(1,pos-pos_old)   = 0 if pos=pos_old, 1 else
+                    pos_in_buffer(proc_gap+1) = pos_in_buffer(proc_gap+1) + min(1,pos-pos_old)
+#ifndef BLOCKING_SEND_PLUS
+                    Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap+1))
+#else
+                    p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)*(V_buffer(pos_in_buffer(proc_gap+1)) &
+                    & - V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2))) + V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                  end if
+                  ! Prepare next step
+                  pos_old = pos
+                  ind = ind + 1
+                  pos = floor(p_inter(ind,i1,i2))
+                end do
+                ! -- When we reach the end of the sub-domain OR the end of the particle line --
+                if (pos>proc_end) then  ! Changement of subdomain
+                  ! We have reach the next subdomain => update values
+                  proc_gap = floor(real(pos-1, WP)/mesh_sc%N_proc(direction)) ! "proc_gap = proc_gap + 1" does not work if N_proc = 1 and pos-pos_old = 2.
+                  myself(1) = (neighbors(direction,proc_gap) == D_rank(direction)) ! For the same reason that line jsute above, we do not use "myself(1) = myself(2)"
+                  proc_end = (proc_gap+1)*mesh_sc%N_proc(direction)
+                  myself(2) = (neighbors(direction,proc_gap+1) == D_rank(direction))
+                  ! ... and go on the next loop !
+                else ! ind == N_proc and no changement of subdomain
+#ifndef BLOCKING_SEND_PLUS
+                  ! Computation for current step
+                  p_inter(ind,i1,i2) = p_inter(ind,i1,i2)-pos
+                  ! Vm
+                  Vm(ind,i1,i2)%pter => V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                  ! Vp
+                  if(pos<proc_end) then
+#ifndef BLOCKING_SEND_PLUS
+                    Vp(ind,i1,i2)%pter => V_comp(pos+1-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#else
+                    p_inter(ind,i1,i2) = (p_inter(ind,i1,i2)-pos)*(V_comp(pos+1-proc_gap*mesh_sc%N_proc(direction), i1,i2) &
+                      & - V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)) + V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                  else ! pos+1 is in next subdomain: use the same algorithm than line 377-390
+                    if(myself(2)) then
+#ifndef BLOCKING_SEND_PLUS
+                      Vp(ind,i1,i2)%pter => V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2)
+#else
+                      p_inter(ind,i1,i2) = (p_inter(ind,i1,i2)-pos)*(V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2) &
+                        & - V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)) + V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                    else
+                      ! If pos = pos_old, we must have pos_in_buffer(proc_gap+1) += 0 (no changes)
+                      ! Else pos>pos_old, we must have pos_in_buffer(proc_gap+1) += 1
+                      ! We use that min(1,pos-pos_old)   = 0 if pos=pos_old, 1 else
+                      pos_in_buffer(proc_gap+1) = pos_in_buffer(proc_gap+1) + min(1,pos-pos_old)
+#ifndef BLOCKING_SEND_PLUS
+                      Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap+1))
+#else
+                      p_inter(ind,i1,i2) = (p_inter(ind,i1,i2)-pos)*(V_buffer(pos_in_buffer(proc_gap+1)) &
+                        & - V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)) + V_comp(pos-proc_gap*mesh_sc%N_proc(direction), i1,i2)
+#endif
+                    end if
+                  end if
+                  ! Go to the next (i1,i2) value: ind must be greater than N_proc
+                  ind = ind +1
+                end if
+              else ! => not myself(1)
+                ! -- Inside the current block, it is always the same --
+                do while ((pos<proc_end).and.(ind<mesh_sc%N_proc(direction)))
+                  ! Computation for current step
+                  pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + pos-pos_old
+#ifndef BLOCKING_SEND_PLUS
+                  p_inter(ind,i1,i2) = p_inter(ind,i1,i2)-pos
+                  Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap)-1)
+                  Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+#else
+                  p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)*(V_buffer(pos_in_buffer(proc_gap)) &
+                      & - V_buffer(pos_in_buffer(proc_gap)-1))) + V_buffer(pos_in_buffer(proc_gap)-1)
+#endif
+                  ! Prepare next step
+                  pos_old = pos
+                  ind = ind + 1
+                  pos = floor(p_inter(ind,i1,i2))
+                end do
+                ! -- When we are exactly on the subdomain transition --
+                do while ((pos==proc_end).and.(ind<mesh_sc%N_proc(direction)))
+                  ! If pos = pos_old, we must have  pos_in_buffer(proc_gap) += 0
+                  !                             and pos_in_buffer(proc_gap+1) += 0 (no changes)
+                  ! Else pos>pos_old, we must have pos_in_buffer(proc_gap) += (pos-pos_old -1)
+                  !                             and pos_in_buffer(proc_gap+1) += 1
+                  ! We use max(0,pos-pos_old-1) = 0 if pos=pos_old, (pos-pos_old-1) else.
+                  !    and min(1,pos-pos_old)   = 0 if pos=pos_old, 1 else
+                  ! Vm is in the same sub-domain
+                  pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + max(0,pos-pos_old-1)
+#ifndef BLOCKING_SEND_PLUS
+                 p_inter(ind,i1,i2) = p_inter(ind,i1,i2)-pos
+                 Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+#endif
+                  ! Vp is in the next one
+                  if(myself(2)) then
+#ifndef BLOCKING_SEND_PLUS
+                    Vp(ind,i1,i2)%pter => V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2)
+#else
+                    p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)* &
+                      & (V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2)   &
+                      & - V_buffer(pos_in_buffer(proc_gap))              ) )&
+                      & + V_buffer(pos_in_buffer(proc_gap))
+#endif
+                  else
+                    pos_in_buffer(proc_gap+1) = pos_in_buffer(proc_gap+1) + min(1,pos-pos_old)
+#ifndef BLOCKING_SEND_PLUS
+                    Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap+1))
+#else
+                    p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)* &
+                      & (V_buffer(pos_in_buffer(proc_gap+1))      &
+                      & - V_buffer(pos_in_buffer(proc_gap)) )    )&
+                      & + V_buffer(pos_in_buffer(proc_gap))
+#endif
+                  end if
+                  ! Prepare next step
+                  pos_old = pos
+                  ind = ind + 1
+                  pos = floor(p_inter(ind,i1,i2))
+                end do
+                ! -- When we reach the end of the sub-domain OR the end of the particle line --
+                if (pos>proc_end) then  ! Changement of subdomain
+                  ! We have reach the next subdomain => update values
+                  proc_gap = floor(real(pos-1, WP)/mesh_sc%N_proc(direction)) ! "proc_gap = proc_gap + 1" does not work if N_proc = 1 and pos-pos_old = 2.
+                  myself(1) = (neighbors(direction,proc_gap) == D_rank(direction)) ! For the same reason that line jsute above, we do not use "myself(1) = myself(2)"
+                  proc_end = (proc_gap+1)*mesh_sc%N_proc(direction)
+                  myself(2) = (neighbors(direction,proc_gap+1) == D_rank(direction))
+                  ! ... and go on the next loop !
+                else ! ind == N_proc and no changement of subdomain
+                  ! Computation for current step
+#ifndef BLOCKING_SEND_PLUS
+                 p_inter(ind,i1,i2) = p_inter(ind,i1,i2)-pos
+#endif
+                  if (pos<proc_end) then
+                    pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + pos-pos_old
+#ifndef BLOCKING_SEND_PLUS
+                    Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap)-1)
+                    Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+#else
+                    p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)*(V_buffer(pos_in_buffer(proc_gap)) &
+                        & - V_buffer(pos_in_buffer(proc_gap)-1))) + V_buffer(pos_in_buffer(proc_gap)-1)
+#endif
+                  else ! pos=proc_end : same as in line 440 to 462
+                    pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + max(0,pos-pos_old-1)
+#ifndef BLOCKING_SEND_PLUS
+                    Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap))
+#endif
+                    ! Vp is in the next one
+                    if(myself(2)) then
+#ifndef BLOCKING_SEND_PLUS
+                      Vp(ind,i1,i2)%pter => V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2)
+#else
+                      p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)*(V_comp(pos+1-(proc_gap+1)*mesh_sc%N_proc(direction), i1,i2) &
+                        & - V_buffer(pos_in_buffer(proc_gap)))) + V_buffer(pos_in_buffer(proc_gap))
+#endif
+                    else
+                      pos_in_buffer(proc_gap+1) = pos_in_buffer(proc_gap+1) + min(1,pos-pos_old)
+#ifndef BLOCKING_SEND_PLUS
+                      Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap+1))
+#else
+                      p_inter(ind,i1,i2) = ((p_inter(ind,i1,i2)-pos)*(V_buffer(pos_in_buffer(proc_gap+1)) &
+                        & - V_buffer(pos_in_buffer(proc_gap)))) + V_buffer(pos_in_buffer(proc_gap))
+#endif
+                    end if
+                  end if
+                  ! Go to the next (i1,i2) value: ind must be greater than N_proc
+                  ind = ind +1
+                end if  ! pos>proc_end
+              end if ! myself(1)
+            end do ! (ind<mesh_sc%N_proc(direction)
+
+        end do ! loop on first coordinate (i1) of a line inside the block of line
+    end do ! loop on second coordinate (i2) of a line inside the block of line
+
+    deallocate(pos_in_buffer)   ! We do not need it anymore
+
+#ifndef BLOCKING_SEND_PLUS
+    ! -- Compute the interpolate velocity --
+    ! Check if communication are done
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap)/=D_rank(direction)) then
+            call mpi_wait(rece_request(proc_gap), rece_status, ierr)
+        end if
+    end do
+    deallocate(rece_request)
+#endif
+
+    ! Then compute the field
+#ifndef BLOCKING_SEND_PLUS
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            do ind = 1, mesh_sc%N_proc(direction)
+                p_inter(ind,i1,i2) = p_inter(ind,i1,i2)*Vp(ind,i1,i2)%pter + (1.-p_inter(ind,i1,i2))*Vm(ind,i1,i2)%pter
+            end do
+        end do
+    end do
+#endif
+
+
+    ! ===== Free memory =====
+    ! -- Pointeur --
+#ifndef BLOCKING_SEND_PLUS
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            do ind = 1, mesh_sc%N_proc(direction)
+                nullify(Vp(ind,i1,i2)%pter)
+                nullify(Vm(ind,i1,i2)%pter)
+            end do
+        end do
+    end do
+#endif
+#ifndef BLOCKING_SEND
+    ! -- Mpi internal buffer for non blocking communication --
+    do proc_gap = send_gap(1,1), send_gap(1,2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            call MPI_WAIT(s_request(proc_gap),rece_status,ierr)
+        end if
+    end do
+    deallocate(s_request)
+#endif
+    ! -- Deallocate dynamic array --
+    deallocate(V_buffer)
+
+end subroutine AC_interpol_lin
+
+
+!> Determine the set of processes wich will send me information during the velocity interpolation and compute
+!! for each of these processes the range of wanted data.
+!!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]    gp_s            = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]    ind_group       = coordinate of the current group of lines
+!!    @param[in]    ind_group       = coordinate of the current group of lines
+!!    @param[out]   send_gap        = gap between my coordinate and the processes of minimal coordinate which will send information to me
+!!    @param[in]    rece_gap        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+!!    @param[in]    rece_gap_abs    = min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+!!    @param[out]   cartography     = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!! @details
+!!    Work on a group of line of size gs(1) x gs(2))
+!!    Obtain the list of processus wich need a part of my local velocity field
+!!    to interpolate the velocity used in the RK2 scheme to advect its particles.
+!!    In the same time, it computes for each processus from which I need a part
+!!    of the velocity field, the range of mesh point where I want data and store it
+!!    by using some sparse matrix technics (see cartography defined in the
+!!    algorithm documentation)
+subroutine AC_interpol_determine_communication(direction, ind_group, gs, send_gap,  &
+    & rece_gap, rece_gap_abs, cartography)
+! XXX Work only for periodic condition.
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Ouput
+    integer, intent(in)                                 :: direction
+    integer, dimension(2), intent(in)                   :: ind_group
+    integer, dimension(2), intent(in)                   :: gs
+    integer, dimension(gs(1), gs(2), 2), intent(in)     :: rece_gap
+    integer, dimension(2), intent(in)                   :: rece_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2, 2), intent(out)               :: send_gap
+    integer, dimension(2+gs(2)*(2+3*gs(1)), &
+        & rece_gap_abs(1):rece_gap_abs(2)), intent(out) :: cartography
+    ! Others
+    integer                             :: proc_gap         ! gap between a processus coordinate (along the current
+                                                            ! direction) into the mpi-topology and my coordinate
+    integer, dimension(gs(1), gs(2))    :: rece_gapP        ! gap between the coordinate of the previous processus (in the current direction)
+                                                            ! and the processes of maximal coordinate which will receive information from it
+    integer, dimension(gs(1), gs(2))    :: rece_gapN        ! same as above but for the next processus
+    integer                             :: send_request_gh  ! mpi status of noindicelocking send
+    integer                             :: send_request_gh2 ! mpi status of noindicelocking send
+    integer                             :: ierr             ! mpi error code
+    integer, dimension(2)               :: tag_table        ! some mpi message tag
+    logical, dimension(:,:), allocatable:: test_request     ! for mpi non blocking communication
+    integer, dimension(:,:), allocatable:: send_request     ! for mpi non blocking send
+    integer                             :: ind1, ind2       ! indice of the current line inside the group
+    integer,dimension(2)                :: rece_buffer      ! buffer for reception of rece_max
+    integer, dimension(:,:), allocatable:: first, last      ! Storage processus to which I will be the first (or the last) to receive
+    integer                             :: min_size         ! begin indice in first and last to stock indice along first dimension of the group line
+    integer                             :: gp_size          ! group size
+    logical                             :: begin_interval   ! ware we in the start of an interval ?
+    logical                             :: not_myself       ! Is the target processus myself ?
+    integer, dimension(MPI_STATUS_SIZE) :: statut
+
+    send_gap(1,1) = 3*mesh_sc%N(direction)
+    send_gap(1,2) = -3*mesh_sc%N(direction)
+    send_gap(2,:) = 0
+    gp_size = gs(1)*gs(2)
+
+    ! ===== Communicate with my neigbors -> obtain ghost ! ====
+    ! Inform that about processus from which I need information
+    tag_table = compute_tag(ind_group, tag_obtrec_ghost_NP, direction)
+    call mpi_ISsend(rece_gap(1,1,1), gp_size, MPI_INTEGER, neighbors(direction,-1), tag_table(1), &
+        & D_comm(direction), send_request_gh, ierr)
+    call mpi_ISsend(rece_gap(1,1,2), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(2), &
+        & D_comm(direction), send_request_gh2, ierr)
+    ! Receive the same message form my neighbors
+    call mpi_recv(rece_gapN(1,1), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(1), D_comm(direction), statut, ierr)
+    call mpi_recv(rece_gapP(1,1), gp_size, MPI_INTEGER, neighbors(direction,-1), tag_table(2), D_comm(direction), statut, ierr)
+
+    ! ===== Compute if I am first or last and determine the carography =====
+    min_size = 2 + gs(2)
+    ! Initialize first and last to determine if I am the the first or the last processes (considering the current direction)
+        ! to require information from this processus
+    allocate(first(2,rece_gap_abs(1):rece_gap_abs(2)))
+    first(2,:) = 0  ! number of lines for which I am the first
+    allocate(last(2,rece_gap_abs(1):rece_gap_abs(2)))
+    last(2,:) = 0   ! number of lines for which I am the last
+    ! Initialize cartography
+    cartography(1,:) = 0            ! number of velocity values to receive
+    cartography(2,:) = min_size     ! number of element to send when sending cartography
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        first(1,proc_gap) = -proc_gap
+        last(1,proc_gap) = -proc_gap
+        not_myself = (neighbors(direction,proc_gap) /= D_rank(direction)) ! Is the target processus myself ?
+        do ind2 = 1, gs(2)
+            cartography(2+ind2,proc_gap) = 0    ! 2 x number of interval of concern line into the column i2
+            begin_interval = .true.
+            do ind1 = 1, gs(1)
+                ! Does proc_gap belongs to [rece_gap(i1,i2,1);rece_gap(i1,i2,2)]?
+                if((proc_gap>=rece_gap(ind1,ind2,1)).and.(proc_gap<=rece_gap(ind1,ind2,2))) then
+                    ! Compute if I am the first.
+                    if (proc_gap>rece_gapP(ind1,ind2)-1) then
+                        first(2,proc_gap) =  first(2,proc_gap)+1
+                    end if
+                    ! Compute if I am the last.
+                    if (proc_gap<rece_gapN(ind1,ind2)+1) then
+                        last(2,proc_gap) =  last(2,proc_gap)+1
+                    end if
+                    ! Update cartography // Not need I target processus is myself
+                    if (not_myself) then
+                        if (begin_interval) then
+                            cartography(2+ind2,proc_gap) =  cartography(2+ind2,proc_gap)+2
+                            cartography(cartography(2,proc_gap)+1,proc_gap) = ind1
+                            cartography(2,proc_gap) = cartography(2,proc_gap) + 2
+                            cartography(cartography(2,proc_gap),proc_gap) = ind1
+                            begin_interval = .false.
+                        else
+                            cartography(cartography(2,proc_gap),proc_gap) = ind1
+                        end if
+                    end if
+                else
+                    begin_interval = .true.
+                end if
+            end do
+        end do
+    end do
+
+    ! ===== Free Isend buffer from first communication =====
+    call MPI_WAIT(send_request_gh,statut,ierr)
+    call MPI_WAIT(send_request_gh2,statut,ierr)
+
+    ! ===== Send information about first and last  =====
+    tag_table = compute_tag(ind_group, tag_obtrec_NP, direction)
+    allocate(send_request(rece_gap_abs(1):rece_gap_abs(2),2))
+    allocate(test_request(rece_gap_abs(1):rece_gap_abs(2),2))
+    test_request = .false.
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        ! I am the first ?
+        if (first(2,proc_gap)>0) then
+            if(neighbors(direction,proc_gap)/= D_rank(direction)) then
+                call mpi_ISsend(first(1,proc_gap), 2, MPI_INTEGER, neighbors(direction,proc_gap),&
+                        & tag_table(1), D_comm(direction), send_request(proc_gap,1), ierr)
+                test_request(proc_gap,1) = .true.
+            else
+                send_gap(1,1) = min(send_gap(1,1), -proc_gap)
+                send_gap(2,1) = send_gap(2,1) + first(2,proc_gap)
+            end if
+        end if
+        ! I am the last ?
+        if (last(2,proc_gap)>0) then
+            if(neighbors(direction,proc_gap)/= D_rank(direction)) then
+                call mpi_ISsend(last(1,proc_gap), 2, MPI_INTEGER, neighbors(direction,proc_gap),&
+                        &  tag_table(2), D_comm(direction), send_request(proc_gap,2), ierr)
+                test_request(proc_gap,2) = .true.
+            else
+                send_gap(1,2) = max(send_gap(1,2), -proc_gap)
+                send_gap(2,2) = send_gap(2,2) + last(2,proc_gap)
+            end if
+        end if
+    end do
+
+
+
+    ! ===== Receive information form the first and the last processus which need a part of my local velocity field =====
+    do while(send_gap(2,1) < gp_size)
+        call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+        send_gap(1,1) = min(send_gap(1,1), rece_buffer(1))
+        send_gap(2,1) = send_gap(2,1) + rece_buffer(2)
+    end do
+    do while(send_gap(2,2) < gp_size)
+        call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+        send_gap(1,2) = max(send_gap(1,2), rece_buffer(1))
+        send_gap(2,2) = send_gap(2,2) + rece_buffer(2)
+    end do
+
+    ! ===== Free Isend buffer =====
+    !call MPI_WAIT(send_request_gh,statut,ierr)
+    !call MPI_WAIT(send_request_gh2,statut,ierr)
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (test_request(proc_gap,1).eqv. .true.) call MPI_WAIT(send_request(proc_gap,1),statut,ierr)
+        if (test_request(proc_gap,2)) call MPI_WAIT(send_request(proc_gap,2),statut,ierr)
+    end do
+    deallocate(send_request)
+    deallocate(test_request)
+
+    ! ===== Deallocate array =====
+    deallocate(first)
+    deallocate(last)
+
+end subroutine AC_interpol_determine_communication
+
+
+!> Interpolate the velocity field used in a RK2 scheme for particle advection -
+!! version for direction with no domain subdivision ands thus no required
+!! communications
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        gs          = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]        V_comp      = velocity to interpolate
+!!    @param[in,out]    p_V         = particle position in input and particle velocity (along the current direction) as output
+!! @details
+!!    A RK2 scheme is used to advect the particles : the midlle point scheme. An
+!!    intermediary position "p_pos_bis(i) = p_pos(i) + V(i)*dt/2" is computed and then
+!!    the numerical velocity of each particles is computed as the interpolation of V  in
+!!    this point. This field is used to advect the particles at the seconde order in time :
+!!    p_pos(t+dt, i) = p_pos(i) + p_V(i).
+!!    Variant for cases with no required communication.
+subroutine AC_interpol_lin_no_com(direction, gs, V_comp, p_V)
+
+    ! This code involve a recopy of p_V. It is possible to directly use the 3D velocity field but it will also limit the meroy access.
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Ouput
+    integer, intent(in)                             :: direction    ! current direction
+    integer, dimension(2),intent(in)                :: gs           ! groupe size
+    real(WP), dimension(:,:,:), intent(in)          :: V_comp
+    real(WP), dimension(:,:,:), intent(inout)       :: p_V
+    ! Others, local
+    integer                                             :: ind          ! indices
+    integer                                             :: i1, i2       ! indices in the lines group
+    integer                                             :: pos          ! indices of the mesh point wich preceed the particle position
+
+
+    ! ===== Compute the interpolated velocity =====
+    ! -- Compute the interpolation weight and update the velocity directly in p_V --
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            do ind = 1, mesh_sc%N(direction)
+
+            pos = floor(p_V(ind,i1,i2))
+            p_V(ind,i1,i2) = V_comp(modulo(pos-1,mesh_sc%N(direction))+1,i1,i2) + (p_V(ind,i1,i2)-pos)* &
+                & (V_comp(modulo(pos,mesh_sc%N(direction))+1,i1,i2)-V_comp(modulo(pos-1,mesh_sc%N(direction))+1,i1,i2))
+
+            end do ! loop on particle indice (ind)
+        end do ! loop on first coordinate (i1) of a line inside the block of line
+    end do ! loop on second coordinate (i2) of a line inside the block of line
+
+end subroutine AC_interpol_lin_no_com
+
+
+!> Interpolate the velocity field from coarse grid at particles positions
+!! version for a group of (more of one) line
+!!    @param[in]        dt          = time step
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        gs          = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]        ind_group   = indices of the current work item
+!!    @param[in]        id1         = first coordinate of the current work item related to the total local mesh
+!!    @param[in]        id2         = first coordinate of the current work item related to the total local mesh
+!!    @param[in]        V_coarse    = velocity to interpolate
+!!    @param[in,out]    p_V         = particle position in input and particle velocity (along the current direction) as output
+!! @details
+!!    A RK2 scheme is used to advect the particles : the midlle point scheme. An
+!!    intermediary position "p_pos_bis(i) = p_pos(i) + V(i)*dt/2" is computed and then
+!!    the numerical velocity of each particles is computed as the interpolation of V  in
+!!    this point. This field is used to advect the particles at the seconde order in time :
+!!    p_pos(t+dt, i) = p_pos(i) + p_V(i).
+!!    The group line indice is used to ensure using unicity of each mpi message tag.
+!!    The interpolation is done for a group of lines, allowing to mutualise
+!!    communications. Considering a group of Na X Nb lines, communication performed
+!!    by this algorithm are around (Na x Nb) bigger than the alogorithm wich
+!!    works on a single line but also around (Na x Nb) less frequent.
+subroutine AC_interpol_plus(direction, gs, ind_group, id1, id2, V_coarse, p_V)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+    !use Interpolation_velo
+    use interpolation_velo, only : get_weight, stencil_g, stencil_d, stencil_size
+
+    implicit none
+
+    ! Input/Ouput
+    integer                   , intent(in)          :: direction    ! current direction
+    integer, dimension(2)     , intent(in)          :: gs           ! groupe size
+    integer, dimension(2)     , intent(in)          :: ind_group
+    integer                   , intent(in)          :: id1, id2
+    real(WP), dimension(:,:,:), intent(inout)       :: p_V
+    real(WP), dimension(:,:,:), intent(in)          :: V_coarse     ! velocity on coarse grid
+    ! Local
+    integer                                     :: idir1, idir2 ! = (id1, id2) -1 as array indice starts from 1.
+    real(WP), dimension(stencil_size)           :: weight       ! interpolation weight storage
+    real(WP), dimension(:), allocatable         :: V_buffer     ! Velocity buffer for postion outside of the local subdomain
+    integer, dimension(:), allocatable          :: pos_in_buffer! buffer size
+    integer , dimension(gs(1), gs(2))           :: rece_ind_min ! minimal indice of mesh involved in remeshing particles (of my local subdomains)
+    integer , dimension(gs(1), gs(2))           :: rece_ind_max ! maximal indice of mesh involved in remeshing particles (of my local subdomains)
+    integer                                     :: ind, ind_com, V_ind ! indices
+    integer                                     :: i_limit, i, ind_gap
+    integer                                     :: i1, i2       ! indices in the lines group
+    integer                                     :: pos, pos_old ! indices of the mesh point wich preceed the particle position
+    integer                                     :: proc_gap, gap! distance between my (mpi) coordonate and coordinate of the
+                                                                ! processus associated to a given position
+    integer                                     :: proc_end     ! final indice of processus associate to current pos
+    logical, dimension(3)                       :: myself
+    integer, dimension(:), allocatable          :: send_carto   ! cartogrpahy of what I have to send
+    integer                                     :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
+    integer                                     :: ind_for_i1   ! where to read the first coordinate (i1) of the current line inside the cartography ?
+    real(WP), dimension(:), allocatable         :: send_buffer  ! to store what I have to send (on a contiguous way)
+    integer, dimension(gs(1),gs(2),2)           :: rece_gap     ! distance between me and processus wich send me information
+    integer, dimension(2 , 2)                   :: send_gap     ! distance between me and processus to wich I send information
+    integer, dimension(2)                       :: rece_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer                                     :: com_size     ! size of message send/receive
+    integer, dimension(:), allocatable          :: size_com     ! size of message send/receive
+    integer                                     :: min_size     ! minimal size of cartography(:,proc_gap)
+    integer                                     :: max_size     ! maximal size of cartography(:,proc_gap)
+    integer                                     :: tag          ! mpi message tag
+    integer, dimension(:), allocatable          :: tag_proc     ! mpi message tag
+    integer                                     :: ierr         ! mpi error code
+    integer, dimension(:), allocatable          :: s_request_bis! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable          :: rece_request ! mpi communication request (handle) of nonblocking receive
+    integer, dimension(MPI_STATUS_SIZE)         :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(:,:), allocatable        :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block for wich the
+                                                                ! current processus requiers data from proc_gap and for each of these lines the range
+                                                                ! of mesh points from where it requiers the velocity values.
+
+    ! -- Initialisation --
+    idir1 = id1 - 1
+    idir2 = id2 - 1
+    ! Compute range of the set of point where I need the velocity value
+    rece_ind_min = floor(p_V(1,:,:)) - stencil_g
+    rece_ind_max = floor(p_V(mesh_sc%N_proc(direction),:,:)) + stencil_d
+
+    ! ===== Exchange velocity field if needed =====
+    ! It uses non blocking message to do the computations during the communication process
+    ! -- What have I to communicate ? --
+    rece_gap(:,:,1) = floor(real(rece_ind_min-1, WP)/mesh_V%N_proc(direction))
+    rece_gap(:,:,2) = floor(real(rece_ind_max-1, WP)/mesh_V%N_proc(direction))
+    rece_gap_abs(1) = minval(rece_gap(:,:,1))
+    rece_gap_abs(2) = maxval(rece_gap(:,:,2))
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(cartography(max_size,rece_gap_abs(1):rece_gap_abs(2)))
+    call AC_interpol_determine_communication(direction, ind_group, gs, send_gap,  &
+    & rece_gap, rece_gap_abs, cartography)
+
+    ! -- Send messages about what I want --
+    allocate(s_request_bis(rece_gap_abs(1):rece_gap_abs(2)))
+    allocate(size_com(rece_gap_abs(1):rece_gap_abs(2)))
+    allocate(tag_proc(rece_gap_abs(1):rece_gap_abs(2)))
+    min_size = 2 + gs(2)
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            cartography(1,proc_gap) = 0
+            ! Use the cartography to know which lines are concerned
+            size_com(proc_gap) = cartography(2,proc_gap)
+            ! Range I want - store into the cartography
+            gap = proc_gap*mesh_V%N_proc(direction)
+            ! Position in cartography(:,proc_gap) of the current i1 indice
+            ind_for_i1 = min_size
+            do i2 = 1, gs(2)
+                do ind = ind_for_i1+1, ind_for_i1 + cartography(2+i2,proc_gap), 2
+                    do i1 = cartography(ind,proc_gap), cartography(ind+1,proc_gap)
+                        ! Interval start from:
+                        cartography(size_com(proc_gap)+1,proc_gap) = max(rece_ind_min(i1,i2), gap+1) ! fortran => indice start from 0
+                        ! and ends at:
+                        cartography(size_com(proc_gap)+2,proc_gap) = min(rece_ind_max(i1,i2), gap+mesh_V%N_proc(direction))
+                        ! update number of element to receive
+                        cartography(1,proc_gap) = cartography(1,proc_gap) &
+                                    & + cartography(size_com(proc_gap)+2,proc_gap) &
+                                    & - cartography(size_com(proc_gap)+1,proc_gap) + 1
+                        size_com(proc_gap) = size_com(proc_gap)+2
+                    end do
+                end do
+                ind_for_i1 = ind_for_i1 + cartography(2+i2,proc_gap)
+            end do
+            ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.
+            tag_proc(proc_gap) = compute_tag(ind_group, tag_velo_range, direction, proc_gap)
+            ! Send message
+#ifdef PART_DEBUG
+            if(size_com(proc_gap)>max_size) then
+                print*, 'rank = ', cart_rank, ' -- bug sur taille cartography a envoyer'
+                print*, 'taille carto = ', com_size, ' plus grand que la taille théorique ', &
+                    & max_size, ' et carto = ', cartography(:,proc_gap)
+            end if
+#endif
+            call mpi_ISsend(cartography(1,proc_gap), size_com(proc_gap), MPI_INTEGER,   &
+                & neighbors(direction,proc_gap), tag_proc(proc_gap), D_comm(direction), &
+                & s_request_bis(proc_gap),ierr)
+        end if
+    end do
+
+
+    ! -- Non blocking reception of the velocity field --
+    ! Allocate the pos_in_buffer to compute V_buffer size and to be able to
+    ! allocate it.
+    allocate(pos_in_buffer(rece_gap_abs(1):rece_gap_abs(2)))
+    pos_in_buffer(rece_gap_abs(1)) = 1
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)-1
+        pos_in_buffer(proc_gap+1)= pos_in_buffer(proc_gap) + cartography(1,proc_gap)
+    end do
+    allocate(V_buffer(pos_in_buffer(rece_gap_abs(2)) &
+                & + cartography(1,rece_gap_abs(2))))
+    V_buffer = 0
+    allocate(rece_request(rece_gap_abs(1):rece_gap_abs(2)))
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            ! IIa - Compute reception tag
+            tag = compute_tag(ind_group, tag_velo_V, direction, -proc_gap)
+            ! IIb - Receive message
+            call mpi_Irecv(V_buffer(pos_in_buffer(proc_gap)), cartography(1,proc_gap), MPI_REAL_WP, &
+                    & neighbors(direction,proc_gap), tag, D_comm(direction), rece_request(proc_gap), ierr)
+        end if
+    end do
+
+    ! -- Send the velocity field to processus which need it --
+    allocate(send_carto(max_size))
+    do proc_gap = send_gap(1,1), send_gap(1,2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            ! I - Receive messages about what I have to send
+            ! Ia - Compute reception tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.
+            tag = compute_tag(ind_group, tag_velo_range, direction, -proc_gap)
+            ! Ib - Receive the message
+            call mpi_recv(send_carto(1), max_size, MPI_INTEGER, neighbors(direction,proc_gap), &
+              & tag, D_comm(direction), rece_status, ierr)
+            ! II - Send it
+            ! IIa - Create send buffer
+            allocate(send_buffer(send_carto(1)))
+            gap = proc_gap*mesh_V%N_proc(direction)
+            com_size = 0
+            ind_1Dtable = send_carto(2)
+            ! Position in cartography(:,proc_gap) of the current i1 indice
+            ind_for_i1 = min_size
+            do i2 = 1, gs(2)
+                do ind = ind_for_i1+1, ind_for_i1 + send_carto(2+i2), 2
+                    do i1 = send_carto(ind), send_carto(ind+1)
+                        do ind_com = send_carto(ind_1Dtable+1)+gap, send_carto(ind_1Dtable+2)+gap ! indice inside the current line
+                            com_size = com_size + 1
+                            send_buffer(com_size) = V_coarse(ind_com, i1+idir1,i2+idir2)
+                        end do
+                        ind_1Dtable = ind_1Dtable + 2
+                    end do
+                end do
+                ind_for_i1 = ind_for_i1 + send_carto(2+i2)
+            end do
+            ! IIa_bis - check correctness
+#ifdef PART_DEBUG
+            if(com_size/=send_carto(1)) then
+                print*, 'rank = ', cart_rank, ' -- bug sur taille champ de vitesse a envoyer'
+                print*, 'taille carto = ', com_size, ' plus grand recu ', &
+                    & send_carto(1), ' et carto = ', send_carto(:)
+            end if
+#endif
+            ! IIb - Compute send tag
+            tag = compute_tag(ind_group, tag_velo_V, direction, proc_gap)
+            ! IIc - Send message
+            call mpi_Send(send_buffer(1), com_size, MPI_REAL_WP,  &
+                    & neighbors(direction,proc_gap), tag, D_comm(direction),&
+                    & ierr)
+                    !& ierr)
+            deallocate(send_buffer)
+        end if
+    end do
+    deallocate(send_carto)
+
+    !-- Free som ISsend buffer and some array --
+! XXX Todo : préférer un call MPI_WAIT_ALL couplé avec une init de s_request_bis
+! sur MPI_REQUEST_NULL et enlever la boucle ET le if.
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            call MPI_WAIT(s_request_bis(proc_gap),rece_status,ierr)
+        end if
+    end do
+    deallocate(s_request_bis)
+    deallocate(cartography) ! We do not need it anymore
+    deallocate(tag_proc)
+    deallocate(size_com)
+
+    ! Check if communication are done before starting the interpolation
+    do proc_gap = rece_gap_abs(1), rece_gap_abs(2)
+        if (neighbors(direction,proc_gap)/=D_rank(direction)) then
+            call mpi_wait(rece_request(proc_gap), rece_status, ierr)
+        end if
+    end do
+    deallocate(rece_request)
+
+!print*, '#### rank = ', cart_rank, ' / V_buff = ', V_buffer
+
+
+  ! ===== Compute the interpolated velocity =====
+  pos_in_buffer = pos_in_buffer - 1
+  do i2 = 1, gs(2)
+    do i1 = 1, gs(1)
+      ind = 1
+      pos = floor(p_V(ind,i1,i2))-stencil_g
+      pos_old = pos-1
+      proc_gap = floor(dble(pos-1)/mesh_V%N_proc(direction))
+      myself(1) =(D_rank(direction)== neighbors(direction,proc_gap))
+      myself(2) = (D_rank(direction) == neighbors(direction,proc_gap+1))
+      ind_gap = proc_gap*mesh_V%N_proc(direction)
+      proc_end=(proc_gap+1)*mesh_V%N_proc(direction)
+      do while (ind <= mesh_sc%N_proc(direction))
+        if (myself(1)) then
+          ! Case 1: If all stencil points belong to the local subdomain associate to current MPI process:
+          do while((pos+stencil_size-1<=proc_end).and.(ind<=mesh_sc%N_proc(direction)))
+            call get_weight(p_V(ind,i1,i2)-(pos+stencil_g), weight)
+            V_ind = pos - ind_gap
+            p_V(ind,i1,i2) = sum(weight*V_coarse(V_ind:V_ind+stencil_size-1,i1+idir1,i2+idir2))
+            ! Update for next particle:
+            ind = ind + 1
+            pos_old = pos
+            pos = floor(p_V(ind,i1,i2))-stencil_g
+          end do ! case 1: while((pos+stencil_size<=proc_end).and.(V_ind<=mesh_sc%N_proc(direction)))
+          ! Case 2: Else if the stencil intersect two local subdomain
+          do while((pos<=proc_end).and.(ind <= mesh_sc%N_proc(direction)))
+            call get_weight(p_V(ind,i1,i2)-(pos+stencil_g), weight)
+            V_ind = pos - ind_gap
+            i_limit = mesh_V%N_proc(direction) - V_ind + 1
+            p_V(ind,i1, i2) = weight(1)*V_coarse(V_ind,i1+idir1,i2+idir2)
+            do i = 2, i_limit
+              p_V(ind,i1,i2) = p_V(ind,i1,i2) + weight(i)*V_coarse(i+V_ind-1,i1+idir1,i2+idir2)
+            end do
+            if(myself(2)) then
+              do i = i_limit+1, stencil_size
+                p_V(ind,i1,i2) = p_V(ind,i1,i2) + weight(i)*V_coarse(i-i_limit,i1+idir1,i2+idir2)
+              end do
+            else ! not(myself(2))
+              ! Start to read in buffer at (pos_in_buffer(proc_gap+1)+1) and do
+              ! not update pos_in_buffer until pos does not change of subdomain.
+              do i = i_limit+1, stencil_size
+                p_V(ind,i1,i2) = p_V(ind,i1,i2) + weight(i)*V_buffer(pos_in_buffer(proc_gap+1)+i-i_limit)
+              end do
+            end if
+            ! Si non(stencil_size < N_proc(direction) +1):
+            !   calculer i_limit2 = min(stencil_size, proc_end + N_proc - pos)
+            !   arrêter la boucle précédente à "i_limit2"
+            !   ajouter une boucle de i_limit2+1 à stencil_size
+            !   Dans cette boucle utiliser proc_gap+2 et myself(3)
+            ! Et ainsi de suite ...
+            ! Update for next particle:
+            ind = ind + 1
+            pos_old = pos
+            pos = floor(p_V(ind,i1,i2))-stencil_g
+          end do ! case 2: ((pos<=proc_end).and.(ind <= mesh_sc%N_proc(direction)))
+        else ! not(myself(1))
+          ! Case 1: If all stencil points belong to the local subdomain associate to current MPI process:
+          do while((pos+stencil_size-1<=proc_end).and.(ind<=mesh_sc%N_proc(direction)))
+            call get_weight(p_V(ind,i1,i2)-(pos+stencil_g), weight)
+            pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + pos - pos_old
+            p_V(ind,i1,i2) = sum(weight*V_buffer(pos_in_buffer(proc_gap):pos_in_buffer(proc_gap)+stencil_size-1))
+            ! Update for next particle:
+            ind = ind + 1
+            pos_old = pos
+            pos = floor(p_V(ind,i1,i2))-stencil_g
+          end do ! case 1: while((pos+stencil_size<=proc_end).and.(V_ind<=mesh_sc%N_proc(direction)))
+          ! Case 2: Else if the stencil intersect two local subdomain
+          do while((pos<=proc_end).and.(ind <= mesh_sc%N_proc(direction)) &
+                                 &.and.(ind<=mesh_sc%N_proc(direction)))
+            call get_weight(p_V(ind,i1,i2)-(pos+stencil_g), weight)
+            i_limit = mesh_V%N_proc(direction) - (pos-ind_gap) + 1
+            pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + pos - pos_old
+            p_V(ind,i1,i2) = weight(1)*V_buffer(pos_in_buffer(proc_gap))
+            do i = 2, i_limit
+              p_V(ind,i1,i2) = p_V(ind,i1,i2) + weight(i)*V_buffer(pos_in_buffer(proc_gap)+i-1)
+            end do
+            if(myself(2)) then
+              do i = i_limit+1, stencil_size
+                p_V(ind,i1,i2) = p_V(ind,i1,i2) + weight(i)*V_coarse(i-i_limit,i1+idir1,i2+idir2)
+              end do
+            else ! not(myself(2))
+              do i = i_limit+1, stencil_size
+                p_V(ind,i1,i2) = p_V(ind,i1,i2) + weight(i)*V_buffer(pos_in_buffer(proc_gap+1)+i-i_limit)
+              end do
+            end if
+            ! Update for next particle:
+            ind = ind + 1
+            pos_old = pos
+            pos = floor(p_V(ind,i1,i2))-stencil_g
+          end do ! case 2: ((pos<=proc_end).and.(ind <= mesh_sc%N_proc(direction)))
+        end if
+        ! Case 3 and 4 can be gathered, either myself is true or not.
+        ! Case 3: Pos belong to the next subdomain
+        if(ind<=mesh_sc%N_proc(direction)) then !Changement de proc
+          proc_gap = proc_gap+1
+          myself(1) = myself(2)
+          ind_gap = proc_end
+          proc_end = proc_end + mesh_V%N_proc(direction)
+          myself(2) = (D_rank(direction) ==neighbors(direction, proc_gap+1))
+        else
+        ! Case 4: End of the line. Update pos_in buffer for next line.
+          ! pos_in_buffer must be update to the maximal indices already used.
+          if (pos_old+stencil_size<=proc_end) then
+            if (.not.(myself(1))) pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + stencil_size - 1
+          else
+            i_limit = mesh_V%N_proc(direction) - (pos_old-ind_gap) + 1
+            !i_limit = mesh_V%N_proc(direction) - (pos-ind_gap)
+            if (.not.(myself(1))) pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + i_limit - 1
+            if (.not.(myself(2))) pos_in_buffer(proc_gap+1) = pos_in_buffer(proc_gap+1) + (stencil_size - i_limit)
+          end if
+        end if ! if case 3
+      end do  ! while (ind<mesh_sc%N_proc)
+    end do    ! i1 = 1, gs(1)
+  end do      ! i2 = 1, gs(2)
+
+  deallocate(pos_in_buffer)   ! We do not need it anymore
+
+  ! ===== Free memory =====
+  ! -- Deallocate dynamic array --
+  deallocate(V_buffer)
+
+end subroutine AC_interpol_plus
+
+
+!> Interpolate the velocity field used in a RK2 scheme for particle advection -
+!! version for direction with no domain subdivision ands thus no required
+!! communications. Work with any interpolation formula.
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        gs          = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]        V_comp      = velocity to interpolate
+!!    @param[in,out]    p_V         = particle position in input and particle velocity (along the current direction) as output
+!! @details
+!!    A RK2 scheme is used to advect the particles : the midlle point scheme. An
+!!    intermediary position "p_pos_bis(i) = p_pos(i) + V(i)*dt/2" is computed and then
+!!    the numerical velocity of each particles is computed as the interpolation of V  in
+!!    this point. This field is used to advect the particles at the seconde order in time :
+!!    p_pos(t+dt, i) = p_pos(i) + p_V(i).
+!!    Variant for cases with no required communication.
+subroutine AC_interpol_plus_no_com(direction, gs, id1, id2, V_coarse, p_V)
+
+    ! This code involve a recopy of p_V. It is possible to directly use the 3D velocity field but it will also limit the meroy access.
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+    use interpolation_velo, only : get_weight, stencil_g, stencil_size
+
+    ! Input/Ouput
+    integer, intent(in)                         :: direction    ! current direction
+    integer, dimension(2),intent(in)            :: gs           ! groupe size
+    integer                   , intent(in)      :: id1, id2
+    real(WP), dimension(:,:,:), intent(in)      :: V_coarse
+    real(WP), dimension(:,:,:), intent(inout)   :: p_V
+    ! Others, local
+    integer                                     :: idir1, idir2 ! = (id1, id2) -1 as array indice starts from 1.
+    real(WP), dimension(stencil_size)           :: weight       ! interpolation weight storage
+    integer                                     :: ind, i_st    ! indices
+    integer                                     :: i1, i2       ! indices in the lines group
+    integer                                     :: pos          ! indices of the mesh point wich preceed the particle position
+
+    idir1 = id1 - 1
+    idir2 = id2 - 1
+
+    ! ===== Compute the interpolated velocity =====
+    ! -- Compute the interpolation weight and update the velocity directly in p_V --
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            do ind = 1, mesh_sc%N(direction)
+              pos = floor(p_V(ind,i1,i2))
+              call get_weight(p_V(ind,i1,i2)-pos, weight)
+              pos = pos - stencil_g - 1
+              p_V(ind,i1,i2) = weight(1)*V_coarse(modulo(pos,mesh_V%N(direction))+1,i1+idir1,i2+idir2)
+              do i_st = 2, stencil_size
+                p_V(ind,i1,i2) = p_V(ind,i1,i2) + &
+                    & weight(i_st)*V_coarse(modulo(pos+i_st-1,mesh_V%N(direction))+1,i1+idir1,i2+idir2)
+              end do ! loop on stencil points.
+            end do ! loop on particle indice (ind)
+        end do ! loop on first coordinate (i1) of a line inside the block of line
+    end do ! loop on second coordinate (i2) of a line inside the block of line
+
+end subroutine AC_interpol_plus_no_com
+
+
+!> Interpolate the velocity field from coarse grid at particles positions
+end module advec_common_interpol
+!> @}
diff --git a/HySoP/src/scalesReduced/particles/advec_common_remesh.F90 b/HySoP/src/scalesReduced/particles/advec_common_remesh.F90
new file mode 100644
index 0000000000000000000000000000000000000000..3310dac08141b04856a3cc90f9a6cb98808bfde8
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_common_remesh.F90
@@ -0,0 +1,1544 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_common
+!
+!
+! DESCRIPTION:
+!> The module ``advec_common'' gather function and subroutines used to advec scalar
+!! which are not specific to a direction
+!! @details
+!! This module gathers functions and routines used to advec scalar which are not
+!! specific to a direction. This is a parallel implementation using MPI and
+!! the cartesien topology it provides. It also contains the variables common to
+!! the solver along each direction and other generic variables used for the
+!! advection based on the particle method.
+!!
+!! Except for testing purpose, this module is not supposed to be used by the
+!! main code but only by the other advection module. More precisly, an final user
+!! must only used the generic "advec" module wich contain all the interface to
+!! solve the advection equation with the particle method, and to choose the
+!! remeshing formula, the dimensionnal splitting and everything else.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_common_remesh
+
+    use precision_tools
+    use advec_abstract_proc
+    use mpi, only: MPI_REQUEST_NULL, MPI_STATUS_SIZE, MPI_INTEGER, MPI_ANY_SOURCE
+    implicit none
+
+
+    ! Information about the particles and their bloc
+    public
+
+
+    ! ===== Public procedures =====
+    !----- Init remeshing context -----
+    public  :: AC_setup_init
+    public  :: AC_remesh_setup_alongX
+    public  :: AC_remesh_setup_alongY
+    public  :: AC_remesh_setup_alongZ
+    !----- To remesh particles -----
+    public                        :: AC_remesh_lambda_group
+    public                        :: AC_remesh_Mprime_group
+    !----- Tools to  remesh particles -----
+    public                        :: AC_remesh_range
+    public                        :: AC_remesh_determine_communication
+    public                        :: AC_remesh_cartography
+
+    ! ===== Private procedures =====
+    !----- Prepare and perform communication required during remeshing -----
+    private :: AC_remesh_init
+    private :: AC_remesh_finalize
+
+    ! ===== Public variables =====
+
+    ! ===== Private variables =====
+    !> Pointer to subroutine wich remesh particle to a buffer - for formula of lambda family (with tag/type).
+    procedure(remesh_in_buffer_type), pointer, private      :: remesh_in_buffer_lambda_pt => null()
+    !> Pointer to subroutine wich remesh particle to a buffer - for formula of lambda family (with tag/type).
+    procedure(remesh_in_buffer_limit), pointer, private     :: remesh_in_buffer_limit_lambda_pt => null()
+    !> Pointer to subroutine wich remesh particle to a buffer - for formula of M' family (without tag/type).
+    procedure(remesh_in_buffer_notype), pointer, private    :: remesh_in_buffer_Mprime_pt => null()
+    !> Pointer to subroutine wich redistribute a buffer (containing remeshed
+    !! particle) inside the original scalar field.
+    procedure(remesh_buffer_to_scalar), pointer, private    :: remesh_buffer_to_scalar_pt => null()
+    !> Pointer to subroutine which compute scalar slope along the current
+    !! direction and then computes the limitator function (divided by 8)
+    procedure(advec_limitator_group), pointer, private      :: advec_limitator            => null()
+
+
+contains
+
+! ===== Public procedure =====
+
+! ================================================================================ !
+! =============     To deal with remeshing setup and generecity      ============= !
+! ================================================================================ !
+
+!> Init remesh_line_pt for the right remeshing formula
+subroutine AC_setup_init()
+
+    use advec_remeshing_lambda
+    use advec_remeshing_Mprime
+
+    call AC_remesh_init_lambda()
+    call AC_remesh_init_Mprime()
+
+end subroutine AC_setup_init
+
+!> Setup remesh_in_buffer and remesh_in_buffer_to_scalar for remeshing along X
+subroutine AC_remesh_setup_alongX()
+    use advecX
+
+    remesh_in_buffer_lambda_pt      => advecX_remesh_in_buffer_lambda
+    remesh_in_buffer_limit_lambda_pt=> advecX_remesh_in_buffer_limit_lambda
+    remesh_in_buffer_Mprime_pt      => advecX_remesh_in_buffer_Mprime
+
+    remesh_buffer_to_scalar_pt      => advecX_remesh_buffer_to_scalar
+
+    advec_limitator                 => advecX_limitator_group
+
+end subroutine AC_remesh_setup_alongX
+
+!> Setup remesh_in_buffer and remesh_in_buffer_to_scalar for remeshing along X
+subroutine AC_remesh_setup_alongY()
+    use advecY
+
+    remesh_in_buffer_lambda_pt      => advecY_remesh_in_buffer_lambda
+    remesh_in_buffer_limit_lambda_pt=> advecY_remesh_in_buffer_limit_lambda
+    remesh_in_buffer_Mprime_pt      => advecY_remesh_in_buffer_Mprime
+    remesh_buffer_to_scalar_pt      => advecY_remesh_buffer_to_scalar
+
+    advec_limitator                 => advecY_limitator_group
+
+end subroutine AC_remesh_setup_alongY
+
+!> Setup remesh_in_buffer and remesh_in_buffer_to_scalar for remeshing along Z
+subroutine AC_remesh_setup_alongZ()
+    use advecZ
+
+    remesh_in_buffer_lambda_pt      => advecZ_remesh_in_buffer_lambda
+    remesh_in_buffer_limit_lambda_pt=> advecZ_remesh_in_buffer_limit_lambda
+    remesh_in_buffer_Mprime_pt      => advecZ_remesh_in_buffer_Mprime
+    remesh_buffer_to_scalar_pt      => advecZ_remesh_buffer_to_scalar
+
+    advec_limitator                 => advecZ_limitator_group
+
+end subroutine AC_remesh_setup_alongZ
+
+
+! ==============================================================================================
+! ====================     Remesh all the particles of a group of lines     ====================
+! ==============================================================================================
+
+
+!> remeshing with an order 2 or 4 lambda method, corrected to allow large CFL number - group version
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        p_V         = particles velocity (needed for tag and type)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        dt          = time step (needed for tag and type)
+subroutine AC_remesh_lambda_group(direction, ind_group, gs, p_pos_adim, p_V, j, k, scal, dt)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+    use advec_correction        ! To compute type and tag
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    ! To type and tag particles
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+    ! Others
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    integer, dimension(gs(1),gs(2),2)       :: send_gap     ! distance between me and processus wich send me information
+    integer, dimension(2)                   :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2 , 2)               :: rece_gap     ! distance between me and processus to wich I send information
+    integer, dimension(:,:), allocatable    :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+    ! Variable use to manage mpi communications
+    integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
+
+    ! ===== Pre-Remeshing: Determine blocks type and tag particles =====
+    call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+
+    ! ===== Compute range of remeshing data =====
+    call AC_remesh_range(bl_type, p_pos_adim, direction, send_group_min, send_group_max, send_gap, send_gap_abs)
+
+    ! ===== Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) =====
+    ! -- Allocation --
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(cartography(max_size,send_gap_abs(1):send_gap_abs(2)))
+    ! -- Determine which processes communicate together --
+    call AC_remesh_determine_communication(direction, gs, ind_group, send_group_min, send_group_max, &
+        & rece_gap, send_gap, send_gap_abs, cartography)
+
+    ! ===== Proceed to remeshing via a local buffer =====
+    call AC_remesh_via_buffer_lambda(direction, ind_group, gs, p_pos_adim, j, k,&
+        & scal, send_group_min, send_group_max, send_gap_abs, rece_gap,         &
+        & cartography, bl_type, bl_tag)
+
+    ! -- Free all communication buffer and data --
+    deallocate(cartography)
+
+end subroutine AC_remesh_lambda_group
+
+
+!> remeshing with an order 2 limited lambda method, corrected to allow large CFL number - group version
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        p_V         = particles velocity (needed for tag and type)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        dt          = time step (needed for tag and type)
+subroutine AC_remesh_limit_lambda_group(direction, ind_group, gs, p_pos_adim, p_V, j, k, scal, dt)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+    use advec_correction        ! To compute type and tag
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    ! To type and tag particles
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+    real(WP), dimension(mesh_sc%N_proc(direction)+1,gs(1),gs(2)):: limit        ! limitator function (divided by 8.)
+    ! Others
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    integer, dimension(gs(1),gs(2),2)       :: send_gap     ! distance between me and processus wich send me information
+    integer, dimension(2)                   :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2 , 2)               :: rece_gap     ! distance between me and processus to wich I send information
+    integer, dimension(:,:), allocatable    :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+                                                            ! current processus will send data during remeshing and for each of these lines the range
+                                                            ! of mesh points from where it requiers the velocity values.
+    ! Variable use to manage mpi communications
+    integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
+
+    ! ===== Pre-Remeshing I: Determine blocks type and tag particles =====
+    call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+
+    ! ===== Compute range of remeshing data =====
+    call AC_remesh_range(bl_type, p_pos_adim, direction, send_group_min, send_group_max, send_gap, send_gap_abs)
+
+    ! ===== Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) =====
+    ! -- Allocation --
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(cartography(max_size,send_gap_abs(1):send_gap_abs(2)))
+    ! -- Determine which processes communicate together --
+    call AC_remesh_determine_communication(direction, gs, ind_group, send_group_min, send_group_max, &
+        & rece_gap, send_gap, send_gap_abs, cartography)
+
+    ! ===== Pre-Remeshing II: Compute the limitor function =====
+    ! Actually, this subroutine compute [limitator/8] as this is this fraction
+    ! wich appear always in the remeshing polynoms.
+    call advec_limitator(gs, ind_group, j, k, p_pos_adim, scal, limit)
+
+    ! ===== Proceed to remeshing via a local buffer =====
+    call AC_remesh_via_buffer_limit_lambda(direction, ind_group, gs, p_pos_adim,&
+        & j, k, scal, send_group_min, send_group_max, send_gap_abs, rece_gap,   &
+        & cartography, bl_type, bl_tag, limit)
+
+    ! -- Free all communication buffer and data --
+    deallocate(cartography)
+
+end subroutine AC_remesh_limit_lambda_group
+
+
+!> remeshing with a M'6 or M'8 remeshing formula - group version
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        p_V         = particles velocity (needed for tag and type)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        dt          = time step (needed for tag and type)
+subroutine AC_remesh_Mprime_group(direction, ind_group, gs, p_pos_adim, p_V, j, k, scal, dt)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    integer, dimension(gs(1),gs(2),2)       :: send_gap     ! distance between me and processus wich send me information
+    integer, dimension(2)                   :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2 , 2)               :: rece_gap     ! distance between me and processus to wich I send information
+    integer, dimension(:,:), allocatable    :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+                                                            ! current processus will send data during remeshing and for each of these lines the range
+                                                            ! of mesh points from where it requiers the velocity values.
+    ! Variable use to manage mpi communications
+    integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
+
+    ! ===== Compute range of remeshing data =====
+    call AC_remesh_range_notype(p_pos_adim, direction, send_group_min, send_group_max, send_gap, send_gap_abs)
+
+    ! ===== Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) =====
+    ! -- Allocation --
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(cartography(max_size,send_gap_abs(1):send_gap_abs(2)))
+    ! -- Determine which processes communicate together --
+    call AC_remesh_determine_communication_com(direction, gs, ind_group, &
+        & rece_gap, send_gap, send_gap_abs, cartography)
+
+    ! ===== Proceed to remeshing via a local buffer =====
+    call AC_remesh_via_buffer_Mprime(direction, ind_group, gs, p_pos_adim,  &
+        &  j, k, scal, send_group_min, send_group_max, send_gap_abs,        &
+        &  rece_gap, cartography)
+
+    ! -- Free all communication buffer and data --
+    deallocate(cartography)
+
+end subroutine AC_remesh_Mprime_group
+
+
+! ===================================================================================================
+! ===== Tools to remesh particles: variant of remeshing via buffer for each family of remeshing =====
+! ===================================================================================================
+
+
+!> Using input information to update the scalar field by creating particle
+!! weight (from scalar values), set scalar to 0, redistribute particle inside
+!! - variant for corrected lambda remeshing formula.
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        bl_type     = table of blocks type (center of left)
+!!    @param[in]        bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
+!!                                    and the begining of the following one is tagged)
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles
+!!    @param[in]        send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+!!    @param[in]        rece_gap    = coordinate range of processes which will send me information during the remeshing.
+!!    @param[in,out]    cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!! @details
+!!    This procedure manage all communication needed. To minimize communications,
+!! particles are remeshing inside a local buffer wich is after send to the
+!! processus wich contain the right sub-domain depending off the particle
+!! position. There is no need of communication in order to remesh inside the
+!! buffer. To avoid recopy in creating particle weight (which will be weight
+!! = scalar), scalar is directly redistribute inside the local buffer.
+!! This provides:
+!!    a - Remesh particle: redistribute scalar field inside a local buffer and
+!!        set scalar = 0.
+!!    b - Send local buffer to its target processus and update the scalar field,
+!!        ie scalar = scalar + received buffer.
+!! "remesh_in_buffer_pt" do the part "a" and "remesh_buffer_to_scalar" the part
+!! B except the communication. The current subroutine manage all the
+!! communications (and other stuff needed to allow correctness).
+subroutine AC_remesh_via_buffer_lambda(direction, ind_group, gs, p_pos_adim,   &
+        & j, k, scal, send_min, send_max, send_gap_abs, rece_gap, cartography, &
+        & bl_type, bl_tag)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_abstract_proc     ! contain some useful procedure pointers.
+    use advecX                  ! procedure specific to advection alongX
+    use advecY                  ! procedure specific to advection alongY
+    use advecZ                  ! procedure specific to advection alongZ
+    use cart_topology           ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    logical,dimension(:,:,:),intent(in)         :: bl_type      ! is the particle block a center block or a left one ?
+    logical,dimension(:,:,:),intent(in)         :: bl_tag       ! indice of tagged particles
+    real(WP),dimension(:,:,:),intent(inout)     :: scal
+    integer, dimension(:,:), intent(in)         :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)         :: send_max     ! distance between me and processus wich send me information
+    integer, dimension(2), intent(in)           :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2, 2), intent(in)        :: rece_gap     ! distance between me and processus to wich I send information
+    integer, dimension(:,:), intent(inout)      :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+                                                            ! current processus will send data during remeshing and for each of these lines the range
+                                                            ! of mesh points from where it requiers the velocity values.
+    ! Other local variables
+    ! Others
+    integer, dimension(:,:), allocatable    :: rece_carto   ! same as abobve but for what I receive
+    integer                                 :: min_size     ! minimal size of cartography(:,proc_gap)
+    integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable,target:: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+                                                            ! sorted by receivers and not by coordinate.
+    integer, dimension(:), allocatable      :: pos_in_buffer! buffer size
+    ! Variable use to manage mpi communications
+    integer, dimension(:), allocatable      :: s_request_ran! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable      :: r_request_ran! mpi communication request (handle) of nonblocking receive
+    integer, dimension(:,:), allocatable    :: r_status     ! mpi communication status of nonblocking receive
+    integer, dimension(:,:), allocatable    :: s_status     ! mpi communication status of nonblocking send
+    integer                                 :: ierr         ! mpi error code
+    integer                                 :: nb_r, nb_s   ! number of reception/send
+
+
+    ! ===== Allocation =====
+    ! -- allocate request about cartography (non-blocking) reception --
+    nb_r = rece_gap(1,2) - rece_gap(1,1) + 1
+    allocate(r_request_ran(1:nb_r))
+    r_request_ran = MPI_REQUEST_NULL
+    ! -- allocate cartography about what I receive --
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(rece_carto(max_size,rece_gap(1,1):rece_gap(1,2)))
+    ! -- allocate request about cartography (non-blocking) send --
+    nb_s = send_gap_abs(2) - send_gap_abs(1) + 1
+    allocate(s_request_ran(1:nb_s))
+    ! -- To manage buffer --
+    ! Position of sub-buffer associated different mpi-processes
+    allocate(pos_in_buffer(0:nb_s))
+
+    ! ===== Init the remeshing process: pre-process  =====
+    ! Perform a cartography of mesh points where particle will be remesh,
+    ! create a 1D to buffer where remeshing will be performed and create
+    ! tools to manage it.
+    call AC_remesh_init(direction, ind_group, gs, send_min, send_max, &
+        & send_gap_abs, rece_gap, nb_s, cartography, rece_carto,      &
+        & pos_in_buffer, min_size, max_size, s_request_ran, r_request_ran)
+
+
+    ! ===== Initialize the general buffer =====
+    allocate(send_buffer(pos_in_buffer(nb_s) &
+                & + cartography(1,nb_s)-1))
+    send_buffer = 0.0
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    call remesh_in_buffer_lambda_pt(gs, j, k, send_gap_abs(1)-1, p_pos_adim, bl_type, bl_tag, send_min, &
+            & send_max, scal, send_buffer, pos_in_buffer)
+    ! Observe that now:
+    ! => pos_in_buffer(i-1) = first (1D-)indice of the sub-array of send_buffer
+    ! associated to he i-rd mpi-processus to wich I will send remeshed particles.
+
+    ! ===== Wait for reception of all cartography =====
+    allocate(r_status(MPI_STATUS_SIZE,1:nb_r))
+    call mpi_waitall(nb_r,r_request_ran, r_status, ierr)
+    deallocate(r_request_ran)
+    deallocate(r_status)
+    !allocate(s_status(MPI_STATUS_SIZE,1:nb_s))
+    !allocate(ind_array(send_gap_abs(1):send_gap_abs(2)))
+    !call mpi_testsome(size(s_request_ran),s_request_ran, ind_1Dtable, ind_array, s_status, ierr)
+    !deallocate(ind_array)
+
+    ! ===== Finish the remeshing process =====
+    ! Send buffer, receive some other buffers and update scalar field.
+    call AC_remesh_finalize(direction, ind_group, gs, j, k, scal, send_gap_abs, rece_gap, &
+      & nb_r, nb_s, cartography, rece_carto, send_buffer, pos_in_buffer, min_size)
+
+
+    ! ===== Free memory and communication buffer ====
+    ! -- Deallocate all field --
+    deallocate(rece_carto)
+    ! -- Check if Isend are done --
+    allocate(s_status(MPI_STATUS_SIZE,1:nb_s))
+    call mpi_waitall(nb_s, s_request_ran, s_status, ierr)
+    deallocate(s_status)
+    ! -- Free all communication buffer and data --
+    deallocate(send_buffer)
+    deallocate(pos_in_buffer)
+    deallocate(s_request_ran)
+
+end subroutine AC_remesh_via_buffer_lambda
+
+
+!> Using input information to update the scalar field by creating particle
+!! weight (from scalar values), set scalar to 0, redistribute particle inside
+!! - variant for corrected and limited lambda remeshing formula.
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        bl_type     = table of blocks type (center of left)
+!!    @param[in]        bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
+!!                                    and the begining of the following one is tagged)
+!!    @param[in]        limit       = limitator function
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles
+!!    @param[in]        send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+!!    @param[in]        rece_gap    = coordinate range of processes which will send me information during the remeshing.
+!!    @param[in,out]    cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!! @details
+!!    This procedure manage all communication needed. To minimize communications,
+!! particles are remeshing inside a local buffer wich is after send to the
+!! processus wich contain the right sub-domain depending off the particle
+!! position. There is no need of communication in order to remesh inside the
+!! buffer. To avoid recopy in creating particle weight (which will be weight
+!! = scalar), scalar is directly redistribute inside the local buffer.
+!! This provides:
+!!    a - Remesh particle: redistribute scalar field inside a local buffer and
+!!        set scalar = 0.
+!!    b - Send local buffer to its target processus and update the scalar field,
+!!        ie scalar = scalar + received buffer.
+!! "remesh_in_buffer_pt" do the part "a" and "remesh_buffer_to_scalar" the part
+!! B except the communication. The current subroutine manage all the
+!! communications (and other stuff needed to allow correctness).
+subroutine AC_remesh_via_buffer_limit_lambda(direction, ind_group, gs, p_pos_adim,  &
+        & j, k, scal, send_min, send_max, send_gap_abs, rece_gap, cartography,      &
+        & bl_type, bl_tag, limit)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_abstract_proc     ! contain some useful procedure pointers.
+    use advecX                  ! procedure specific to advection alongX
+    use advecY                  ! procedure specific to advection alongY
+    use advecZ                  ! procedure specific to advection alongZ
+    use cart_topology           ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    logical, dimension(:,:,:), intent(in)       :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(:,:,:), intent(in)       :: bl_tag       ! indice of tagged particles
+    real(WP), dimension(:,:,:), intent(in)      :: limit        ! limitator function (divided by 8.)
+    real(WP),dimension(:,:,:),intent(inout)     :: scal
+    integer, dimension(:,:), intent(in)         :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)         :: send_max     ! distance between me and processus wich send me information
+    integer, dimension(2), intent(in)           :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2, 2), intent(in)        :: rece_gap     ! distance between me and processus to wich I send information
+    integer, dimension(:,:), intent(inout)      :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+                                                            ! current processus will send data during remeshing and for each of these lines the range
+                                                            ! of mesh points from where it requiers the velocity values.
+    ! Other local variables
+    ! Others
+    integer, dimension(:,:), allocatable    :: rece_carto   ! same as abobve but for what I receive
+    integer                                 :: min_size     ! minimal size of cartography(:,proc_gap)
+    integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable,target:: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+                                                            ! sorted by receivers and not by coordinate.
+    integer, dimension(:), allocatable      :: pos_in_buffer! buffer size
+    ! Variable use to manage mpi communications
+    integer, dimension(:), allocatable      :: s_request_ran! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable      :: r_request_ran! mpi communication request (handle) of nonblocking receive
+    integer, dimension(:,:), allocatable    :: r_status     ! mpi communication status of nonblocking receive
+    integer, dimension(:,:), allocatable    :: s_status     ! mpi communication status of nonblocking send
+    integer                                 :: ierr         ! mpi error code
+    integer                                 :: nb_r, nb_s   ! number of reception/send
+
+
+    ! ===== Allocation =====
+    ! -- allocate request about cartography (non-blocking) reception --
+    nb_r = rece_gap(1,2) - rece_gap(1,1) + 1
+    allocate(r_request_ran(1:nb_r))
+    r_request_ran = MPI_REQUEST_NULL
+    ! -- allocate cartography about what I receive --
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(rece_carto(max_size,rece_gap(1,1):rece_gap(1,2)))
+    ! -- allocate request about cartography (non-blocking) send --
+    nb_s = send_gap_abs(2) - send_gap_abs(1) + 1
+    allocate(s_request_ran(1:nb_s))
+    ! -- To manage buffer --
+    ! Position of sub-buffer associated different mpi-processes
+    allocate(pos_in_buffer(0:nb_s))
+
+    ! ===== Init the remeshing process: pre-process  =====
+    ! Perform a cartography of mesh points where particle will be remesh,
+    ! create a 1D to buffer where remeshing will be performed and create
+    ! tools to manage it.
+    call AC_remesh_init(direction, ind_group, gs, send_min, send_max, &
+        & send_gap_abs, rece_gap, nb_s, cartography, rece_carto,      &
+        & pos_in_buffer, min_size, max_size, s_request_ran, r_request_ran)
+
+
+    ! ===== Initialize the general buffer =====
+    allocate(send_buffer(pos_in_buffer(nb_s) &
+                & + cartography(1,nb_s)-1))
+    send_buffer = 0.0
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    call remesh_in_buffer_limit_lambda_pt(gs, j, k, send_gap_abs(1)-1, p_pos_adim, bl_type, bl_tag, limit,  &
+            & send_min, send_max, scal, send_buffer, pos_in_buffer)
+    ! Observe that now:
+    ! => pos_in_buffer(i-1) = first (1D-)indice of the sub-array of send_buffer
+    ! associated to he i-rd mpi-processus to wich I will send remeshed particles.
+
+    ! ===== Wait for reception of all cartography =====
+    allocate(r_status(MPI_STATUS_SIZE,1:nb_r))
+    call mpi_waitall(nb_r,r_request_ran, r_status, ierr)
+    deallocate(r_request_ran)
+    deallocate(r_status)
+    !allocate(s_status(MPI_STATUS_SIZE,1:nb_s))
+    !allocate(ind_array(send_gap_abs(1):send_gap_abs(2)))
+    !call mpi_testsome(size(s_request_ran),s_request_ran, ind_1Dtable, ind_array, s_status, ierr)
+    !deallocate(ind_array)
+
+    ! ===== Finish the remeshing process =====
+    ! Send buffer, receive some other buffers and update scalar field.
+    call AC_remesh_finalize(direction, ind_group, gs, j, k, scal, send_gap_abs, rece_gap, &
+      & nb_r, nb_s, cartography, rece_carto, send_buffer, pos_in_buffer, min_size)
+
+
+    ! ===== Free memory and communication buffer ====
+    ! -- Deallocate all field --
+    deallocate(rece_carto)
+    ! -- Check if Isend are done --
+    allocate(s_status(MPI_STATUS_SIZE,1:nb_s))
+    call mpi_waitall(nb_s, s_request_ran, s_status, ierr)
+    deallocate(s_status)
+    ! -- Free all communication buffer and data --
+    deallocate(send_buffer)
+    deallocate(pos_in_buffer)
+    deallocate(s_request_ran)
+
+end subroutine AC_remesh_via_buffer_limit_lambda
+
+
+!> Using input information to update the scalar field by creating particle
+!! weight (from scalar values), set scalar to 0, redistribute particle inside
+!! - variant for M' remeshing formula.
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles
+!!    @param[in]        send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+!!    @param[in]        rece_gap    = coordinate range of processes which will send me information during the remeshing.
+!!    @param[in,out]    cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!! @details
+!!    This procedure manage all communication needed. To minimize communications,
+!! particles are remeshing inside a local buffer wich is after send to the
+!! processus wich contain the right sub-domain depending off the particle
+!! position. There is no need of communication in order to remesh inside the
+!! buffer. To avoid recopy in creating particle weight (which will be weight
+!! = scalar), scalar is directly redistribute inside the local buffer.
+!! This provides:
+!!    a - Remesh particle: redistribute scalar field inside a local buffer and
+!!        set scalar = 0.
+!!    b - Send local buffer to its target processus and update the scalar field,
+!!        ie scalar = scalar + received buffer.
+!! "remesh_in_buffer_pt" do the part "a" and "remesh_buffer_to_scalar" the part
+!! B except the communication. The current subroutine manage all the
+!! communications (and other stuff needed to allow correctness).
+subroutine AC_remesh_via_buffer_Mprime(direction, ind_group, gs, p_pos_adim, &
+        & j, k, scal, send_min, send_max, send_gap_abs, rece_gap, cartography)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use advec_abstract_proc     ! contain some useful procedure pointers.
+    use advecX                  ! procedure specific to advection alongX
+    use advecY                  ! procedure specific to advection alongY
+    use advecZ                  ! procedure specific to advection alongZ
+    use cart_topology           ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP),dimension(:,:,:),intent(inout)     :: scal
+    integer, dimension(:,:), intent(in)         :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)         :: send_max     ! distance between me and processus wich send me information
+    integer, dimension(2), intent(in)           :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2, 2), intent(in)        :: rece_gap     ! distance between me and processus to wich I send information
+    integer, dimension(:,:), intent(inout)      :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+                                                            ! current processus will send data during remeshing and for each of these lines the range
+                                                            ! of mesh points from where it requiers the velocity values.
+    ! Other local variables
+    ! Others
+    integer, dimension(:,:), allocatable    :: rece_carto   ! same as abobve but for what I receive
+    integer                                 :: min_size     ! minimal size of cartography(:,proc_gap)
+    integer                                 :: max_size     ! maximal size of cartography(:,proc_gap)
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable,target:: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+                                                            ! sorted by receivers and not by coordinate.
+    integer, dimension(:), allocatable      :: pos_in_buffer! buffer size
+    ! Variable use to manage mpi communications
+    integer, dimension(:), allocatable      :: s_request_ran! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable      :: r_request_ran! mpi communication request (handle) of nonblocking receive
+    integer, dimension(:,:), allocatable    :: r_status     ! mpi communication status of nonblocking receive
+    integer, dimension(:,:), allocatable    :: s_status     ! mpi communication status of nonblocking send
+    integer                                 :: ierr         ! mpi error code
+    integer                                 :: nb_r, nb_s   ! number of reception/send
+
+
+    ! ===== Allocation =====
+    ! -- allocate request about cartography (non-blocking) reception --
+    nb_r = rece_gap(1,2) - rece_gap(1,1) + 1
+    allocate(r_request_ran(1:nb_r))
+    r_request_ran = MPI_REQUEST_NULL
+    ! -- allocate cartography about what I receive --
+    max_size = 2 + gs(2)*(2+3*gs(1))
+    allocate(rece_carto(max_size,rece_gap(1,1):rece_gap(1,2)))
+    ! -- allocate request about cartography (non-blocking) send --
+    nb_s = send_gap_abs(2) - send_gap_abs(1) + 1
+    allocate(s_request_ran(1:nb_s))
+    ! -- To manage buffer --
+    ! Position of sub-buffer associated different mpi-processes
+    allocate(pos_in_buffer(0:nb_s))
+
+    ! ===== Init the remeshing process: pre-process  =====
+    ! Perform a cartography of mesh points where particle will be remesh,
+    ! create a 1D to buffer where remeshing will be performed and create
+    ! tools to manage it.
+    call AC_remesh_init(direction, ind_group, gs, send_min, send_max, &
+        & send_gap_abs, rece_gap, nb_s, cartography, rece_carto,      &
+        & pos_in_buffer, min_size, max_size, s_request_ran, r_request_ran)
+
+
+    ! ===== Initialize the general buffer =====
+    allocate(send_buffer(pos_in_buffer(nb_s) &
+                & + cartography(1,nb_s)-1))
+    send_buffer = 0.0
+
+    ! ===== Remeshing into the buffer by using pointer array =====
+    call remesh_in_buffer_Mprime_pt(gs, j, k, send_gap_abs(1)-1, p_pos_adim, send_min, &
+            & send_max, scal, send_buffer, pos_in_buffer)
+    ! Observe that now:
+    ! => pos_in_buffer(i-1) = first (1D-)indice of the sub-array of send_buffer
+    ! associated to he i-rd mpi-processus to wich I will send remeshed particles.
+
+    ! ===== Wait for reception of all cartography =====
+    allocate(r_status(MPI_STATUS_SIZE,1:nb_r))
+    call mpi_waitall(nb_r,r_request_ran, r_status, ierr)
+    deallocate(r_request_ran)
+    deallocate(r_status)
+    !allocate(s_status(MPI_STATUS_SIZE,1:nb_s))
+    !allocate(ind_array(send_gap_abs(1):send_gap_abs(2)))
+    !call mpi_testsome(size(s_request_ran),s_request_ran, ind_1Dtable, ind_array, s_status, ierr)
+    !deallocate(ind_array)
+
+    ! ===== Finish the remeshing process =====
+    ! Send buffer, receive some other buffers and update scalar field.
+    call AC_remesh_finalize(direction, ind_group, gs, j, k, scal, send_gap_abs, rece_gap, &
+      & nb_r, nb_s, cartography, rece_carto, send_buffer, pos_in_buffer, min_size)
+
+
+    ! ===== Free memory and communication buffer ====
+    ! -- Deallocate all field --
+    deallocate(rece_carto)
+    ! -- Check if Isend are done --
+    allocate(s_status(MPI_STATUS_SIZE,1:nb_s))
+    call mpi_waitall(nb_s, s_request_ran, s_status, ierr)
+    deallocate(s_status)
+    ! -- Free all communication buffer and data --
+    deallocate(send_buffer)
+    deallocate(pos_in_buffer)
+    deallocate(s_request_ran)
+
+end subroutine AC_remesh_via_buffer_Mprime
+
+
+! ==================================================================================
+! ====================     Other tools to remesh particles      ====================
+! ==================================================================================
+
+!> Determine where the particles of each lines will be remeshed
+!!    @param[in]    bl_type         = equal 0 (resp 1) if the block is left (resp centered)
+!!    @param[in]    p_pos_adim      = adimensionned  particles position
+!!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[out]   send_min        = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[out]   send_max        = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[out]   send_gap        = distance between me and processus wich send me information (for each line of the group)
+!!    @param[out]   send_gap_abs    = send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+subroutine AC_remesh_range(bl_type, p_pos_adim, direction, send_min, send_max, send_gap, send_gap_abs)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    logical, dimension(:,:,:), intent(in)   :: bl_type      ! is the particle block a center block or a left one ?
+    real(WP), dimension(:,:,:), intent(in)  :: p_pos_adim   ! adimensionned particles position
+    integer, intent(in)                     :: direction
+    integer, dimension(:,:), intent(out)    :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(out)    :: send_max     ! distance between me and processus wich send me information
+    integer, dimension(:,:,:), intent(out)  :: send_gap     ! distance between me and processus wich send me information
+    integer, dimension(2), intent(out)      :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+
+    !  -- Compute ranges --
+    where (bl_type(1,:,:))
+        ! First particle is a centered one
+        send_min = nint(p_pos_adim(1,:,:))-remesh_stencil(1)
+    elsewhere
+        ! First particle is a left one
+        send_min = floor(p_pos_adim(1,:,:))-remesh_stencil(1)
+    end where
+    where (bl_type(bl_nb(direction)+1,:,:))
+        ! Last particle is a centered one
+        send_max = nint(p_pos_adim(mesh_sc%N_proc(direction),:,:))+remesh_stencil(2)
+    elsewhere
+        ! Last particle is a left one
+        send_max = floor(p_pos_adim(mesh_sc%N_proc(direction),:,:))+remesh_stencil(2)
+    end where
+
+    ! -- What have I to communicate ? --
+    send_gap(:,:,1) = floor(real(send_min-1, WP)/mesh_sc%N_proc(direction))
+    send_gap(:,:,2) = floor(real(send_max-1, WP)/mesh_sc%N_proc(direction))
+    send_gap_abs(1) = minval(send_gap(:,:,1))
+    send_gap_abs(2) = maxval(send_gap(:,:,2))
+
+end subroutine AC_remesh_range
+
+
+!> Determine where the particles of each lines will be remeshed - Variant for
+!! remeshing without type/tag
+!!    @param[in]    p_pos_adim      = adimensionned  particles position
+!!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[out]   send_min        = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[out]   send_max        = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[out]   send_gap        = distance between me and processus wich send me information (for each line of the group)
+!!    @param[out]   send_gap_abs    = send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+subroutine AC_remesh_range_notype(p_pos_adim, direction, send_min, send_max, send_gap, send_gap_abs)
+
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    real(WP), dimension(:,:,:), intent(in)  :: p_pos_adim   ! adimensionned particles position
+    integer, intent(in)                     :: direction
+    integer, dimension(:,:), intent(out)    :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(out)    :: send_max     ! distance between me and processus wich send me information
+    integer, dimension(:,:,:), intent(out)  :: send_gap     ! distance between me and processus wich send me information
+    integer, dimension(2), intent(out)      :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+
+    !  -- Compute ranges --
+    send_min = floor(p_pos_adim(1,:,:))-remesh_stencil(1)
+    send_max = floor(p_pos_adim(mesh_sc%N_proc(direction),:,:))+remesh_stencil(2)
+
+    ! -- What have I to communicate ? --
+    send_gap(:,:,1) = floor(real(send_min-1, WP)/mesh_sc%N_proc(direction))
+    send_gap(:,:,2) = floor(real(send_max-1, WP)/mesh_sc%N_proc(direction))
+    send_gap_abs(1) = minval(send_gap(:,:,1))
+    send_gap_abs(2) = maxval(send_gap(:,:,2))
+
+end subroutine AC_remesh_range_notype
+
+
+!> Determine the set of processes wich will send me information during the remeshing
+!! and compute for each of these processes the range of wanted data. Use implicit
+!! computation rather than communication (only possible if particle are gather by
+!! block whith contrainst on velocity variation - as corrected lambda formula.) -
+!! work directly on a group of particles lines.
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        remesh_min  =  minimal indice of meshes where I will remesh my particles.
+!!    @param[in]        remesh_max  =  maximal indice of meshes where I will remesh my particles.
+!!    @param[out]       rece_gap    = coordinate range of processes which will send me information during the remeshing.
+!!    @param[in]        send_gap    = distance between me and processus wich send me information (for each line of the group)
+!!    @param[in]        send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+!!    @param[in,out]    cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!! @details
+!!    Work on a group of line of size gs(1) x gs(2))
+!!    Obtain the list of processus which are associated to sub-domain where my particles
+!!    will be remeshed and the list of processes wich contains particles which
+!!    have to be remeshed in my sub-domain. This way, this procedure determine
+!!    which processus need to communicate together in order to proceed to the
+!!    remeshing (as in a parrallel context the real space is subdivised and each
+!!    processus contains a part of it)
+!!        In the same time, it computes for each processus with which I will
+!!    communicate, the range of mesh point involved for each line of particles
+!!    inside the group and it stores it by using some sparse matrix technics
+!!    (see cartography defined in the algorithm documentation)
+!!        This routine does not involve any communication to determine if
+!!    a processus is the first or the last processes (considering its coordinate along
+!!    the current directory) to send remeshing information to a given processes.
+!!    It directly compute it using contraints on velocity (as in corrected lambda
+!!    scheme) When possible use it rather than AC_obtain_senders_com
+subroutine AC_remesh_determine_communication(direction, gs, ind_group, remesh_min, remesh_max, &
+    & rece_gap, send_gap, send_gap_abs, cartography)
+! XXX Work only for periodic condition. For dirichlet conditions : it is
+! possible to not receive either rece_gap(1), either rece_gap(2) or none of
+! these two => detect it (track the first and the last particles) and deal with it.
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/output
+    integer, intent(in)                                 :: direction
+    integer, dimension(2), intent(in)                   :: gs           ! a group size
+    integer, dimension(2), intent(in)                   :: ind_group
+    integer, dimension(:,:), intent(in)                 :: remesh_min   ! minimal indice of meshes where I will remesh my particles.
+    integer, dimension(:,:), intent(in)                 :: remesh_max   ! maximal indice of meshes where I will remesh my particles.
+    integer, dimension(2, 2), intent(out)               :: rece_gap
+    integer(kind=4), dimension(gs(1),gs(2),2),intent(in):: send_gap     ! minimal and maximal processus which contains the sub-domains where my
+                                                                        ! particles will be remeshed for each line of the line group
+    integer, dimension(2), intent(in)                   :: send_gap_abs ! min and maximal processus which contains the sub-domains where my particles will be remeshed.
+    integer, dimension(2+gs(2)*(2+3*gs(1)), &
+        & send_gap_abs(1):send_gap_abs(2)), intent(out) :: cartography
+
+    ! To manage communications and to localize sub-domain
+    integer(kind=4)                         :: proc_gap         ! gap between a processus coordinate (along the current
+                                                                ! direction) into the mpi-topology and my coordinate
+    integer, dimension(2)                   :: tag_table        ! mpi message tag (for communicate rece_gap(1) and rece_gap(2))
+    integer, dimension(:,:),allocatable     :: send_request     ! mpi status of nonblocking send
+    integer                                 :: ierr             ! mpi error code
+    integer, dimension(MPI_STATUS_SIZE)     :: statut           ! mpi status
+    ! To determine which processus is the first/last to send data to another
+    integer, dimension(:,:), allocatable    :: first, last      ! Storage processus to which I will be the first (or the last) to send
+                                                                ! remeshed particles
+    integer                                 :: first_condition  ! allowed range of value of proc_min and proc_max for being the first
+    integer                                 :: last_condition   ! allowed range of value of proc_min and proc_max for being the last
+    ! Other local variable
+    integer                                 :: ind1, ind2       ! indice of the current line inside the group
+    integer                                 :: min_size         ! begin indice in first and last to stock indice along first dimension of the group line
+    integer                                 :: gp_size          ! group size
+    integer,dimension(2)                    :: rece_buffer      ! buffer for reception of rece_max
+    logical                                 :: begin_interval   ! ware we in the start of an interval ?
+
+    rece_gap(1,1) = 3*mesh_sc%N(direction)
+    rece_gap(1,2) = -3*mesh_sc%N(direction)
+    rece_gap(2,:) = 0
+    gp_size = gs(1)*gs(2)
+
+    allocate(send_request(send_gap_abs(1):send_gap_abs(2),3))
+    send_request(:,3) = 0
+
+    ! ===== Compute if I am first or last and determine the cartography =====
+    min_size = 2 + gs(2)
+    ! Initialize first and last to determine if I am the the first or the last processes (considering the current direction)
+        ! to require information from this processus
+    allocate(first(2,send_gap_abs(1):send_gap_abs(2)))
+    first(2,:) = 0  ! number of lines for which I am the first
+    allocate(last(2,send_gap_abs(1):send_gap_abs(2)))
+    last(2,:) = 0   ! number of lines for which I am the last
+    ! Initialize cartography
+    cartography(1,:) = 0            ! number of velocity values to receive
+    cartography(2,:) = min_size     ! number of element to send when sending cartography
+    ! And compute cartography, first and last !
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+        first(1,proc_gap) = -proc_gap
+        last(1,proc_gap) = -proc_gap
+        first_condition =  1-2*bl_bound_size + proc_gap*mesh_sc%N_proc(direction)+1
+        last_condition  = -1+2*bl_bound_size + (proc_gap+1)*mesh_sc%N_proc(direction)
+        do ind2 = 1, gs(2)
+            cartography(2+ind2,proc_gap) = 0    ! 2 x number of interval of concern line into the column i2
+            begin_interval = .true.
+            do ind1 = 1, gs(1)
+                ! Does proc_gap belongs to [send_gap(i1,i2,1);send_gap(i1,i2,2)]?
+                if((proc_gap>=send_gap(ind1,ind2,1)).and.(proc_gap<=send_gap(ind1,ind2,2))) then
+                    ! Compute if I am the first.
+                    if (remesh_min(ind1,ind2)< first_condition) first(2,proc_gap) =  first(2,proc_gap)+1
+                    ! Compute if I am the last.
+                    if (remesh_max(ind1,ind2) > last_condition) last(2,proc_gap) =  last(2,proc_gap)+1
+                    ! Update cartography // Needed even if target processus is myself as we us buffer
+                    ! in all the case (scalar field cannot be used directly during the remeshing)
+                    if (begin_interval) then
+                        cartography(2+ind2,proc_gap) =  cartography(2+ind2,proc_gap)+2
+                        cartography(cartography(2,proc_gap)+1,proc_gap) = ind1
+                        cartography(2,proc_gap) = cartography(2,proc_gap) + 2
+                        cartography(cartography(2,proc_gap),proc_gap) = ind1
+                        begin_interval = .false.
+                    else
+                        cartography(cartography(2,proc_gap),proc_gap) = ind1
+                    end if
+                else
+                    begin_interval = .true.
+                end if
+            end do
+        end do
+    end do
+
+    ! ===== Send information about first and last  =====
+    tag_table = compute_tag(ind_group, tag_obtsend_NP, direction)
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+        ! I am the first ?
+        if (first(2,proc_gap)>0) then
+            if(neighbors(direction,proc_gap)/= D_rank(direction)) then
+                call mpi_ISsend(first(1,proc_gap), 2, MPI_INTEGER, neighbors(direction,proc_gap), &
+                        & tag_table(1), D_comm(direction), send_request(proc_gap,1), ierr)
+                send_request(proc_gap,3) = 1
+            else
+                rece_gap(1,1) = min(rece_gap(1,1), -proc_gap)
+                rece_gap(2,1) = rece_gap(2,1) + first(2,proc_gap)
+            end if
+        end if
+        ! I am the last ?
+        if (last(2,proc_gap)>0) then
+            if(neighbors(direction,proc_gap)/= D_rank(direction)) then
+                call mpi_ISsend(last(1,proc_gap), 2, MPI_INTEGER, neighbors(direction,proc_gap), &
+                        & tag_table(2), D_comm(direction), send_request(proc_gap,2), ierr)
+                send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
+            else
+                rece_gap(1,2) = max(rece_gap(1,2), -proc_gap)
+                rece_gap(2,2) = rece_gap(2,2) + last(2,proc_gap)
+            end if
+        end if
+    end do
+
+    ! ===== Receive information form the first and the last processus which need a part of my local velocity field =====
+    do while(rece_gap(2,1) < gp_size)
+        call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+        rece_gap(1,1) = min(rece_gap(1,1), rece_buffer(1))
+        rece_gap(2,1) = rece_gap(2,1) + rece_buffer(2)
+    end do
+    do while(rece_gap(2,2) < gp_size)
+        call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+        rece_gap(1,2) = max(rece_gap(1,2), rece_buffer(1))
+        rece_gap(2,2) = rece_gap(2,2) + rece_buffer(2)
+    end do
+
+    ! ===== Free Isend buffer =====
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+        select case (send_request(proc_gap,3))
+            case (3)
+                call mpi_wait(send_request(proc_gap,1), statut, ierr)
+                call mpi_wait(send_request(proc_gap,2), statut, ierr)
+            case (2)
+                call mpi_wait(send_request(proc_gap,2), statut, ierr)
+            case (1)
+                call mpi_wait(send_request(proc_gap,1), statut, ierr)
+        end select
+    end do
+
+    ! ===== Deallocate fields =====
+    deallocate(send_request)
+    deallocate(first)
+    deallocate(last)
+
+end subroutine AC_remesh_determine_communication
+
+
+!> Determine the set of processes wich will send me information during the remeshing
+!! and compute for each of these processes the range of wanted data. Version for M'6
+!! scheme (some implicitation can not be done anymore)
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[out]       rece_gap    = coordinate range of processes which will send me information during the remeshing.
+!!    @param[in]        send_gap    = distance between me and processus to wich I will send information (for each line of the group)
+!!    @param[in]        send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+!!    @param[in,out]    cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!! @details
+!!    Work on a group of line of size gs(1) x gs(2))
+!!    Obtain the list of processus which are associated to sub-domain where my particles
+!!    will be remeshed and the list of processes wich contains particles which
+!!    have to be remeshed in my sub-domain. This way, this procedure determine
+!!    which processus need to communicate together in order to proceed to the
+!!    remeshing (as in a parrallel context the real space is subdivised and each
+!!    processus contains a part of it)
+!!        In the same time, it computes for each processus with which I will
+!!    communicate, the range of mesh point involved for each line of particles
+!!    inside the group and it stores it by using some sparse matrix technics
+!!    (see cartography defined in the algorithm documentation)
+!!        This routine involves communication to determine if a processus is
+!!    the first or the last processes (considering its coordinate along
+!!    the current directory) to send remeshing information to a given processes.
+subroutine AC_remesh_determine_communication_com(direction, gs, ind_group, &
+    & rece_gap, send_gap, send_gap_abs, cartography)
+! XXX Work only for periodic condition.
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/output
+    integer, intent(in)                                 :: direction
+    integer, dimension(2), intent(in)                   :: gs           ! a group size
+    integer, dimension(2), intent(in)                   :: ind_group
+    integer, dimension(2, 2), intent(out)               :: rece_gap     ! minimal and maximal processus which will remesh inside me
+    integer(kind=4), dimension(gs(1),gs(2),2),intent(in):: send_gap     ! minimal and maximal processus which contains the sub-domains where my
+                                                                        ! particles will be remeshed for each line of the line group
+    integer, dimension(2), intent(in)                   :: send_gap_abs ! min and maximal processus which contains the sub-domains where my particles will be remeshed.
+    integer, dimension(2+gs(2)*(2+3*gs(1)), &
+        & send_gap_abs(1):send_gap_abs(2)), intent(out) :: cartography
+
+    ! To manage communications and to localize sub-domain
+    integer(kind=4)                         :: proc_gap         ! gap between a processus coordinate (along the current
+                                                                ! direction) into the mpi-topology and my coordinate
+    integer, dimension(2)                   :: tag_table        ! mpi message tag (for communicate rece_gap(1) and rece_gap(2))
+    integer, dimension(:,:),allocatable     :: send_request     ! mpi status of nonblocking send
+    integer                                 :: ierr             ! mpi error code
+    integer, dimension(MPI_STATUS_SIZE)     :: statut           ! mpi status
+    ! To determine which processus is the first/last to send data to another
+    integer, dimension(gs(1), gs(2))        :: send_max_prev    ! maximum gap between previous processus and the receivers of its remeshing buffer
+    integer, dimension(gs(1), gs(2))        :: send_min_next    ! minimum gap between next processus and the receivers of its remeshing buffer
+    integer, dimension(:,:), allocatable    :: first, last      ! Storage processus to which I will be the first (or the last) to send
+                                                                ! remeshed particles
+    ! Other local variable
+    integer                                 :: ind1, ind2       ! indice of the current line inside the group
+    integer                                 :: min_size         ! begin indice in first and last to stock indice along first dimension of the group line
+    integer                                 :: gp_size          ! group size
+    integer,dimension(2)                    :: rece_buffer      ! buffer for reception of rece_max
+    logical                                 :: begin_interval   ! are we in the start of an interval ?
+
+    rece_gap(1,1) = 3*mesh_sc%N(direction)
+    rece_gap(1,2) = -3*mesh_sc%N(direction)
+    rece_gap(2,:) = 0
+    gp_size = gs(1)*gs(2)
+
+    allocate(send_request(send_gap_abs(1):send_gap_abs(2),3))
+    send_request(:,3) = 0
+
+    ! ===== Exchange ghost =====
+    ! Compute message tag - we re-use tag_part_tag_NP id as using this procedure
+    ! suppose not using "AC_type_and_block"
+    tag_table = compute_tag(ind_group, tag_part_tag_NP, direction)
+    ! Exchange "ghost"
+    call mpi_Sendrecv(send_gap(1,1,1), gp_size, MPI_INTEGER, neighbors(direction,-1), tag_table(1), &
+            & send_min_next(1,1), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(1),    &
+            & D_comm(direction), statut, ierr)
+    call mpi_Sendrecv(send_gap(1,1,2), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(2), &
+            & send_max_prev(1,1), gp_size, MPI_INTEGER, neighbors(direction,-1), tag_table(2),    &
+            & D_comm(direction), statut, ierr)
+    ! Translat to adapt gap to my position
+    send_max_prev = send_max_prev - 1
+    send_min_next = send_min_next + 1
+
+    ! ===== Compute if I am first or last and determine the cartography =====
+    min_size = 2 + gs(2)
+    ! Initialize first and last to determine if I am the the first or the last processes (considering the current direction)
+        ! to require information from this processus
+    allocate(first(2,send_gap_abs(1):send_gap_abs(2)))
+    first(2,:) = 0  ! number of lines for which I am the first
+    allocate(last(2,send_gap_abs(1):send_gap_abs(2)))
+    last(2,:) = 0   ! number of lines for which I am the last
+    ! Initialize cartography
+    cartography(1,:) = 0            ! number of velocity values to receive
+    cartography(2,:) = min_size     ! number of element to send when sending cartography
+    ! And compute cartography, first and last !
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+        first(1,proc_gap) = -proc_gap
+        last(1,proc_gap) = -proc_gap
+        do ind2 = 1, gs(2)
+            cartography(2+ind2,proc_gap) = 0    ! 2 x number of interval of concern line into the column i2
+            begin_interval = .true.
+            do ind1 = 1, gs(1)
+                ! Does proc_gap belongs to [send_gap(i1,i2,1);send_gap(i1,i2,2)]?
+                if((proc_gap>=send_gap(ind1,ind2,1)).and.(proc_gap<=send_gap(ind1,ind2,2))) then
+                    ! Compute if I am the first.
+                    if(proc_gap > send_max_prev(ind1,ind2)) first(2,proc_gap) =  first(2,proc_gap)+1
+                    ! Compute if I am the last.
+                    if(proc_gap < send_min_next(ind1,ind2)) last(2,proc_gap) =  last(2,proc_gap)+1
+                    ! Update cartography // Needed even if target processus is myself as we us buffer
+                    ! in all the case (scalar field cannot be used directly during the remeshing)
+                    if (begin_interval) then
+                        cartography(2+ind2,proc_gap) =  cartography(2+ind2,proc_gap)+2
+                        cartography(cartography(2,proc_gap)+1,proc_gap) = ind1
+                        cartography(2,proc_gap) = cartography(2,proc_gap) + 2
+                        cartography(cartography(2,proc_gap),proc_gap) = ind1
+                        begin_interval = .false.
+                    else
+                        cartography(cartography(2,proc_gap),proc_gap) = ind1
+                    end if
+                else
+                    begin_interval = .true.
+                end if
+            end do
+        end do
+    end do
+
+    ! ===== Send information about first and last  =====
+    tag_table = compute_tag(ind_group, tag_obtsend_NP, direction)
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+        ! I am the first ?
+        if (first(2,proc_gap)>0) then
+            if(neighbors(direction,proc_gap)/= D_rank(direction)) then
+                call mpi_ISsend(first(1,proc_gap), 2, MPI_INTEGER, neighbors(direction,proc_gap), &
+                        & tag_table(1), D_comm(direction), send_request(proc_gap,1), ierr)
+                send_request(proc_gap,3) = 1
+            else
+                rece_gap(1,1) = min(rece_gap(1,1), -proc_gap)
+                rece_gap(2,1) = rece_gap(2,1) + first(2,proc_gap)
+            end if
+        end if
+        ! I am the last ?
+        if (last(2,proc_gap)>0) then
+            if(neighbors(direction,proc_gap)/= D_rank(direction)) then
+                call mpi_ISsend(last(1,proc_gap), 2, MPI_INTEGER, neighbors(direction,proc_gap), &
+                        & tag_table(2), D_comm(direction), send_request(proc_gap,2), ierr)
+                send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
+            else
+                rece_gap(1,2) = max(rece_gap(1,2), -proc_gap)
+                rece_gap(2,2) = rece_gap(2,2) + last(2,proc_gap)
+            end if
+        end if
+    end do
+
+    ! ===== Receive information form the first and the last processus which need a part of my local velocity field =====
+    do while(rece_gap(2,1) < gp_size)
+        call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+        rece_gap(1,1) = min(rece_gap(1,1), rece_buffer(1))
+        rece_gap(2,1) = rece_gap(2,1) + rece_buffer(2)
+    end do
+    do while(rece_gap(2,2) < gp_size)
+        call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+        rece_gap(1,2) = max(rece_gap(1,2), rece_buffer(1))
+        rece_gap(2,2) = rece_gap(2,2) + rece_buffer(2)
+    end do
+
+    ! ===== Free Isend buffer =====
+    do proc_gap = send_gap_abs(1), send_gap_abs(2)
+        select case (send_request(proc_gap,3))
+            case (3)
+                call mpi_wait(send_request(proc_gap,1), statut, ierr)
+                call mpi_wait(send_request(proc_gap,2), statut, ierr)
+            case (2)
+                call mpi_wait(send_request(proc_gap,2), statut, ierr)
+            case (1)
+                call mpi_wait(send_request(proc_gap,1), statut, ierr)
+        end select
+    end do
+
+    ! ===== Deallocate fields =====
+    deallocate(send_request)
+    deallocate(first)
+    deallocate(last)
+
+end subroutine AC_remesh_determine_communication_com
+
+
+!> Update the cartography of data which will be exchange from a processus to another in order to remesh particles.
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]        gs          = size of group of line along the current direction
+!!    @param[in]        begin_i1    = indice corresponding to the first place into the cartography
+!!                                      array where indice along the the direction of the group of lines are stored.
+!!    @param[in]        proc_gap    = distance between my (mpi) coordonate and coordinate of the target processus
+!!    @param[in]        ind_carto   = current column inside the cartography (different to proc_Gap as in this procedure
+!!                                    therefore first indice = 1, carto range are not given into argument)
+!!    @param[in]        send_min    = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]        send_max    = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in,out]    cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the
+!!                                    current processus requiers data from proc_gap and for each of these lines the range
+!!                                    of mesh points from where it requiers the velocity values.
+!!    @param[out]       com_size    = number of elements (integers) stored into the cartography (which will be the size of some mpi communication)
+subroutine AC_remesh_cartography(direction, gs, begin_i1, proc_gap, ind_carto, send_min, send_max, cartography, com_size)
+
+    use cart_topology           ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                     :: direction
+    integer, dimension(2), intent(in)       :: gs
+    integer, intent(in)                     :: begin_i1     ! indice corresponding to the first place into the cartography
+                                                            ! array where indice along the the direction of the group of
+                                                            ! lines are stored.
+    integer, intent(in)                     :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the target
+    integer, intent(in)                     :: ind_carto    ! current column inside the cartography (different to proc_Gap as in this procedure
+                                                            ! therefore first indice = 1, carto range are not given into argument)
+    integer, dimension(:,:), intent(in)     :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)     :: send_max     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(inout)  :: cartography
+    integer, intent(out)                    :: com_size     ! number of elements (integers) stored into the cartography (which will
+                                                            ! be the size of some mpi communication)
+
+    ! Other local variables
+    integer                                 :: gap          ! gap between my local indices and the local indices from another processes
+    integer                                 :: i1, i2       ! indice of a line into the group
+    integer                                 :: ind_for_i1   ! where to read the first coordinate (i1) of the current line inside the cartography ?
+    integer                                 :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
+
+    cartography(1,ind_carto) = 0
+    ! Use the cartography to know which lines are concerned
+    com_size = cartography(2,ind_carto)
+    ! Range I want - store into the cartography
+    gap = proc_gap*mesh_sc%N_proc(direction)
+    ! Position in cartography(:,ind_carto) of the current i1 indice
+    ind_for_i1 = begin_i1
+    do i2 = 1, gs(2)
+        do ind_1Dtable = ind_for_i1+1, ind_for_i1 + cartography(2+i2,ind_carto), 2
+            do i1 = cartography(ind_1Dtable,ind_carto), cartography(ind_1Dtable+1,ind_carto)
+                ! Interval start from:
+                cartography(com_size+1,ind_carto) = max(send_min(i1,i2), gap+1) ! fortran => indice start from 0
+                ! and ends at:
+                cartography(com_size+2,ind_carto) = min(send_max(i1,i2), gap+mesh_sc%N_proc(direction))
+                ! update number of element to send
+                cartography(1,ind_carto) = cartography(1,ind_carto) &
+                            & + cartography(com_size+2,ind_carto) &
+                            & - cartography(com_size+1,ind_carto) + 1
+                com_size = com_size+2
+            end do
+        end do
+        ind_for_i1 = ind_for_i1 + cartography(2+i2,ind_carto)
+    end do
+
+end subroutine AC_remesh_cartography
+
+
+!> Perform all the pre-process in order to remesh particle and to perform associated communication.
+!! @ detail
+!!     As geometric domain is subdivise among the different mpi-processes, the
+!! particle remeshing involve mpi-communication in order to re-distribuate
+!! particle weight to the rigth place.
+!!     In order to gather theses communications for different particles lines,
+!! the particle remeshing is performed into a buffer. The buffer is an 1D-array
+!! which structure ensure that all the value that has to be send to a given
+!! processus is memory continguous.
+!!     This subroutine create this buffer and provide a map to manage it. This
+!! map allow to associate a XYZ-coordinate (into the geometrical domain) to each
+!! element of this 1D-array.
+subroutine AC_remesh_init(direction, ind_group, gs, send_min, send_max, &
+    & send_gap_abs, rece_gap, nb_s, cartography, rece_carto,            &
+    & pos_in_buffer, min_size, max_size, s_request_ran, r_request_ran)
+
+    use cart_topology     ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                     :: direction
+    integer, dimension(2), intent(in)       :: ind_group
+    integer, dimension(2), intent(in)       :: gs
+    integer, dimension(:,:), intent(in)     :: send_min     ! distance between me and first processus wich send me information (for each line of particle)
+    integer, dimension(:,:), intent(in)     :: send_max     ! distance between me and last processus wich send me information
+    integer, dimension(2), intent(in)       :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2, 2), intent(in)    :: rece_gap     ! distance between me and processus to wich I send information
+    integer, intent(in)                     :: nb_s         ! number of reception/send
+    integer, dimension(:,:), intent(inout)  :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+                                                            ! current processus will send data during remeshing and for each of these lines the range
+    integer, dimension(:,:), intent(inout)  :: rece_carto   ! same as abobve but for what I receive
+                                                            ! of mesh points from where it requiers the velocity values.
+    integer,dimension(0:nb_s),intent(inout) :: pos_in_buffer! information about organization of the 1D buffer used to remesh
+                                                            ! a 3D set of particles.
+    integer, intent(out)                    :: min_size     ! tool to manage cartography
+    integer, intent(in)                     :: max_size     ! tool to manage cartography
+    integer, dimension(:), intent(inout)    :: s_request_ran! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), intent(inout)    :: r_request_ran! mpi communication request (handle) of nonblocking receive
+
+    ! Others
+    integer                                 :: proc_gap     ! distance between my (mpi) coordonate and coordinate of the
+                                                            ! processus associated to a given position
+    integer                                 :: ind_gap      ! loop indice
+    integer                                 :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
+    ! Variable use to manage mpi communications
+    integer                                 :: com_size     ! size of message send/receive
+    integer                                 :: tag          ! mpi message tag
+    integer                                 :: ierr         ! mpi error code
+
+    ! ===== Receive cartography =====
+    ! It is better to post recceive before sending.
+    ind_1Dtable = 0
+    do proc_gap = rece_gap(1,1), rece_gap(1,2)
+        ind_1Dtable = ind_1Dtable + 1
+        if (neighbors(direction,proc_gap)/= D_rank(direction)) then
+            tag = compute_tag(ind_group, tag_bufToScal_range, direction, -proc_gap)
+            call mpi_Irecv(rece_carto(1,ind_1Dtable), max_size, MPI_INTEGER,  &
+                & neighbors(direction,proc_gap), tag, D_COMM(direction),      &
+                & r_request_ran(ind_1Dtable), ierr)
+        else
+            rece_carto(1,ind_1Dtable) = 0
+        end if
+    end do
+
+    ! ===== Complete cartography and send range about the particles I remesh =====
+    s_request_ran = MPI_REQUEST_NULL
+    min_size = 2 + gs(2)
+    proc_gap = send_gap_abs(1) - 1
+    do ind_gap = 1, nb_s !send_gap_abs(2), send_gap_abs(1) + 1
+        proc_gap = proc_gap + 1
+        !proc_gap = ind_gap+send_gap_abs(1)-1
+        call AC_remesh_cartography(direction, gs, min_size, proc_gap, ind_gap, &
+            & send_min, send_max, cartography, com_size)
+#ifdef PART_DEBUG
+            if(com_size>max_size) then
+                print*, 'taille carto = ', com_size ,' plus grand que la taille théorique ', &
+                    & max_size,' et carto = ', cartography(:,ind_gap)
+            end if
+#endif
+        ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction and unique Id.
+        tag = compute_tag(ind_group, tag_bufToScal_range, direction, proc_gap)
+        ! Send message
+        if (neighbors(direction,proc_gap) /= D_rank(direction)) then
+            call mpi_ISsend(cartography(1,ind_gap), com_size, MPI_INTEGER,&
+                & neighbors(direction,proc_gap), tag, D_comm(direction),  &
+                & s_request_ran(ind_gap),ierr)
+        end if
+    end do
+
+    ! ===== Initialize the general buffer =====
+    ! The same buffer is used to send data to all target processes. It size
+    ! has to be computed as the part reserved to each processus.
+    ! and it has to be splitted into parts for each target processes
+    ! => pos_in_buffer(i) = first (1D-)indice of the sub-array of send_buffer
+    ! associated to he i-rd mpi-processus to wich I will send remeshed particles.
+    pos_in_buffer(0) = 1
+    pos_in_buffer(1)   = 1
+    do ind_gap =1, nb_s - 1 !send_gap_abs(2)-send_gap_abs(1)
+        pos_in_buffer(ind_gap+1)= pos_in_buffer(ind_gap) + cartography(1,ind_gap)
+    end do
+    ! In writing values in the send buffer during the remeshing, pos_in_buffer will be update.
+    ! As it has one supplementary element (the "0" one), after this process pos_in_buffer(i-1)
+    ! will be equal to first (1D-)indice of the sub-array of send_buffer
+    ! associated to he i-rd mpi-processus to wich I will send remeshed particles.
+
+end subroutine AC_remesh_init
+
+!> Perform all the staff to compute scalar value at t+dt from the buffer
+!containing the remeshing of local particles.
+!! @ detail
+!!     After having remeshing the particles of the local sub-domain into a
+!! buffer, it remains to send the buffer to the different processus according
+!! to the domain sub-division into each processus. Then, the local scalar field
+!! is update thanks to the received buffers.
+subroutine AC_remesh_finalize(direction, ind_group, gs, j, k, scal, send_gap_abs, rece_gap, &
+    & nb_r, nb_s, cartography, rece_carto, send_buffer, pos_in_buffer, min_size)
+
+    use cart_topology     ! Description of mesh and of mpi topology
+    use advec_variables         ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP),dimension(:,:,:),intent(inout)     :: scal
+    integer, dimension(2), intent(in)           :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2)
+    integer, dimension(2, 2), intent(in)        :: rece_gap     ! distance between me and processus to wich I send information
+    integer, intent(in)                         :: nb_r, nb_s   ! number of reception/send
+    integer, dimension(:,:), intent(in)         :: cartography  ! cartography(proc_gap) contains the set of the lines indice in the block to wich the
+                                                                ! current processus will send data during remeshing and for each of these lines the range
+                                                                ! of mesh points from where it requiers the velocity values.
+    integer, dimension(:,:), intent(in)         :: rece_carto   ! same as above but for what I receive
+    real(WP),dimension(:), intent(in)           :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+                                                                ! sorted by receivers and not by coordinate.
+    integer, dimension(0:nb_s), intent(inout)   :: pos_in_buffer! buffer size
+    integer, intent(in)                         :: min_size     ! tool to mange buffer - begin indice in first and last to stock indice along first dimension of the group line
+
+    ! Other local variables
+    integer                                 :: proc_gap, gap! distance between my (mpi) coordonate and coordinate of the
+                                                            ! processus associated to a given position
+    integer                                 :: ind_gap
+    integer                                 :: ind_1Dtable  ! indice of my current position inside a one-dimensionnal table
+    ! Variable used to update scalar field from the buffers
+    real(WP),dimension(:),allocatable,target:: rece_buffer  ! buffer use to receive scalar field from other processes.
+    integer, dimension(:), allocatable      :: rece_pos     ! cells of indice from rece_pos(i) to rece_proc(i+1) into rece_buffer
+                                                            ! are devoted to the processus of relative position = i
+    ! Variable use to manage mpi communications
+    integer, dimension(:), allocatable      :: s_request_sca! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable      :: r_request_sca! mpi communication request (handle) of nonblocking receive
+#ifndef BLOCKING_SEND
+    integer, dimension(:,:), allocatable    :: s_status     ! mpi communication status of nonblocking send
+#endif
+    integer, dimension(mpi_status_size)     :: r_status   ! another mpi communication status
+    integer                                 :: tag          ! mpi message tag
+    integer                                 :: ierr         ! mpi error code
+    integer                                 :: missing_msg  ! number of remeshing buffer not yet received
+
+
+    ! ===== Receive buffer (init receive before send) =====
+    ! -- Compute size of reception buffer and split it into part corresponding to each sender --
+    allocate(rece_pos(rece_gap(1,1):rece_gap(1,2)+1))
+    rece_pos(rece_gap(1,1)) = 1
+    ind_gap = 0
+    do proc_gap = rece_gap(1,1), rece_gap(1,2)
+        ind_gap = ind_gap + 1
+        rece_pos(proc_gap+1)= rece_pos(proc_gap) + rece_carto(1,ind_gap)
+    end do
+    allocate(rece_buffer(rece_pos(rece_gap(1,2)+1)-1))
+    ! -- And initialize the reception --
+    allocate(r_request_sca(1:nb_r))
+    r_request_sca = MPI_REQUEST_NULL
+    ind_gap = 0
+    do proc_gap = rece_gap(1,1), rece_gap(1,2)
+        ind_gap = ind_gap + 1 ! = proc_gap - rece_gap(1,1)+1
+        if (neighbors(direction,proc_gap)/= D_rank(direction)) then
+            tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, -proc_gap)
+            call mpi_Irecv(rece_buffer(rece_pos(proc_gap)), rece_carto(1,ind_gap),  &
+                & MPI_REAL_WP, neighbors(direction,proc_gap), tag,         &
+                & D_COMM(direction), r_request_sca(ind_gap), ierr)
+        end if
+    end do
+
+    ! ===== Send buffer =====
+    missing_msg = nb_r
+    allocate(s_request_sca(1:nb_s))
+    s_request_sca = MPI_REQUEST_NULL
+    proc_gap = send_gap_abs(1)-1
+    ! -- Send the buffer to the matching processus and update the scalar field --
+    do ind_gap = 1, nb_s
+        proc_gap = proc_gap +1
+        !proc_gap = ind_gap-1+send_gap_abs(1)
+        if (neighbors(direction,proc_gap)/=D_rank(direction)) then
+            ! Send buffer
+            tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, ind_gap-1+send_gap_abs(1))
+#ifdef BLOCKING_SEND
+            call mpi_Send(send_buffer(pos_in_buffer(ind_gap-1)), cartography(1,ind_gap), MPI_REAL_WP, &
+                & neighbors(direction,proc_gap), tag, D_comm(direction), r_status, ierr)
+#else
+            call mpi_ISsend(send_buffer(pos_in_buffer(ind_gap-1)), cartography(1,ind_gap), MPI_REAL_WP, &
+                & neighbors(direction,proc_gap), tag, D_comm(direction), s_request_sca(ind_gap),ierr)
+#endif
+        else
+            ! Range I want - store into the cartography
+            !gap = -(ind_gap-1+send_gap_abs(1))*mesh_sc%N_proc(direction)
+            gap = -proc_gap*mesh_sc%N_proc(direction)
+            ! Update directly the scalar field
+            call remesh_buffer_to_scalar_pt(gs, j, k, ind_gap, gap, min_size, &
+                    & cartography, send_buffer, scal, pos_in_buffer(ind_gap-1))
+            missing_msg = missing_msg - 1
+        end if
+    end do
+
+    ! ===== Update scalar field =====
+    do while (missing_msg >= 1)
+        ! --- Choose one of the first available message ---
+        ! more precisly: the last reception ended (and not free) and if not such
+        ! message available, the first reception ended.
+        call mpi_waitany(nb_r, r_request_sca, ind_1Dtable, r_status, ierr)
+        ! -- Update the scalar field by using the cartography --
+        ! Range I want - store into the cartography
+        proc_gap = ind_1Dtable + rece_gap(1,1)-1
+        gap = proc_gap*mesh_sc%N_proc(direction)
+        call remesh_buffer_to_scalar_pt(gs, j, k, ind_1Dtable, gap, min_size, &
+                & rece_carto, rece_buffer, scal, rece_pos(proc_gap))
+        missing_msg = missing_msg - 1
+    end do
+
+    ! ===== Free memory and communication buffer ====
+    ! -- Deallocate all field --
+    deallocate(rece_pos)
+    deallocate(rece_buffer)
+    deallocate(r_request_sca)
+#ifndef BLOCKING_SEND
+    ! -- Check if Isend are done --
+    allocate(s_status(MPI_STATUS_SIZE,1:nb_s))
+    call mpi_waitall(nb_s, s_request_sca, s_status, ierr)
+    deallocate(s_status)
+    ! -- Free all communication buffer and data --
+    deallocate(s_request_sca)
+#endif
+
+end subroutine AC_remesh_finalize
+
+
+end module advec_common_remesh
+!> @}
diff --git a/HySoP/src/scalesReduced/particles/advec_correction.f90 b/HySoP/src/scalesReduced/particles/advec_correction.f90
new file mode 100644
index 0000000000000000000000000000000000000000..4285ad935ce14ce4dff80ac9861215c6e6e8ac03
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_correction.f90
@@ -0,0 +1,268 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_correction
+!
+!
+! DESCRIPTION:
+!> The module ``advec_correction'' gather function and subroutines used to computed
+!! eventual correction or limitator if wanted. These tools are
+!! independant from the direction.
+!! @details
+!! This module gathers functions and routines used to determine when correction
+!! are required depending on the remeshing formula. It includes particle
+!! type and tag (for corrected lambda schemes) and variation computation
+!! for limitator.
+!!
+!! Except for testing purpose, this module is not supposed to be used by the
+!! main code but only by the other advection module. More precisly, an final user
+!! must only used the generic "advec" module wich contain all the interface to
+!! solve the advection equation with the particle method, and to choose the
+!! remeshing formula, the dimensionnal splitting and everything else.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_correction
+
+  use mpi, only: MPI_STATUS_SIZE
+
+    implicit none
+    
+
+    !----- Determine block type and tag particles -----
+    public  :: AC_type_and_block_group
+    public  :: AC_limitator_from_slopes
+
+contains
+
+! ===========================================================================================================
+! ====================     Bloc type and particles tag for corrected lambda schemes      ====================
+! ===========================================================================================================
+
+!> Determine type (center or left) of each block and tag for a complete group of
+!! lines.
+!! corrected remeshing formula are recquired.
+!!    @param[in]        dt          = time step
+!!    @param[in]        dir         = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        gp_s        = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        p_V         = particle velocity (along the current direction)
+!!    @param[out]       bl_type     = table of blocks type (center of left)
+!!    @param[out]       bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
+!!                                    and the begining of the following one is tagged)
+!! @details
+!!        This subroutine work on a groupe of line. For each line of this group, it
+!!    determine the type of each block of this line and where corrected remeshing
+!!    formula are required. In those points, it tagg block transition (ie the end of
+!!    the current block and the beginning of the following one) in order to indicate
+!!    that corrected weigth have to be used during the remeshing.
+subroutine AC_type_and_block_group(dt, dir, gp_s, ind_group, p_V, &
+                & bl_type, bl_tag)
+
+    
+    use precision_tools ! define working precision_tools (double or simple)
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    real(WP), intent(in)                      :: dt           ! time step
+    integer, intent(in)                       :: dir
+    integer, dimension(2),intent(in)          :: gp_s         ! groupe size
+    integer, dimension(2), intent(in)         :: ind_group    ! group indice
+    real(WP), dimension(:,:,:), intent(in)    :: p_V
+    logical,dimension(:,:,:),intent(out)      :: bl_type      ! is the particle block a center block or a left one ?
+    logical,dimension(:,:,:),intent(out)      :: bl_tag       ! indice of tagged particles
+
+    real(WP),dimension(bl_nb(dir)+1,gp_s(1),gp_s(2))            :: bl_lambdaMin ! for a particle, lamda = V*dt/dx ;  bl_lambdaMin = min of
+                                                                                ! lambda on a block (take also into account first following particle)
+    real(WP),dimension(gp_s(1),gp_s(2))                         :: lambP, lambN ! buffer to exchange some lambda min with other processus
+    real(WP),dimension(gp_s(1),gp_s(2))                         :: lambB, lambE ! min value of lambda of the begin of the line and at the end of the line
+    integer, dimension(bl_nb(dir)+1,gp_s(1),gp_s(2))            :: bl_ind       ! block index : integer as lambda in (bl_ind,bl_ind+1) for a left block
+                                                                                ! and lambda in (bl_ind-1/2, bl_ind+1/2) for a right block
+    integer                                                     :: ind,i_p      ! some indices
+    real(WP)                                                    :: cfl          ! = d_sc
+    integer, dimension(2)                                       :: send_request ! mpi status of nonblocking send
+    integer, dimension(2)                                       :: rece_request ! mpi status of nonblocking receive
+    integer, dimension(MPI_STATUS_SIZE)                         :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(MPI_STATUS_SIZE)                         :: send_status  ! mpi status (for mpi_wait)
+    integer, dimension(2)                                       :: tag_table    ! other tags for mpi message
+    integer                                                     :: com_size     ! size of mpi message
+    integer                                                     :: ierr         ! mpi error code
+
+    ! ===== Initialisation =====
+    cfl = dt/mesh_sc%dx(dir)
+    com_size = gp_s(1)*gp_s(2)
+
+    ! ===== Compute bl_lambdaMin =====
+
+    ! Receive ghost value, ie value from neighbors boundaries.
+    tag_table = compute_tag(ind_group, tag_part_tag_NP, dir)
+    call mpi_Irecv(lambN(1,1), com_size, MPI_REAL_WP, &
+            & neighbors(dir,1), tag_table(1), D_comm(dir), rece_request(1), ierr)
+    call mpi_Irecv(lambP(1,1), com_size, MPI_REAL_WP, &
+            &  neighbors(dir,-1), tag_table(2), D_comm(dir), rece_request(2), ierr)
+
+    ! -- For the first block (1/2) --
+    ! The domain contains only its second half => exchange ghost with the previous processus
+    lambB = minval(p_V(1:(bl_size/2)+1,:,:),1)*cfl
+    !tag_table = compute_tag(ind_group, tag_part_tag_NP, dir)   ! Tag table is already equals to this.
+    ! Send message
+    call mpi_ISsend(lambB(1,1), com_size, MPI_REAL_WP, &
+            & neighbors(dir,-1), tag_table(1), D_comm(dir), send_request(1), ierr)
+
+    ! -- For the last block (1/2) --
+    ! The processus contains only its first half => exchange ghost with the next processus
+    ind = bl_nb(dir) + 1
+    lambE = minval(p_V(mesh_sc%N_proc(dir) - (bl_size/2)+1 :mesh_sc%N_proc(dir),:,:),1)*cfl
+    ! Send message
+    call mpi_ISsend(lambE(1,1), com_size, MPI_REAL_WP, &
+            & neighbors(dir,1), tag_table(2), D_comm(dir), send_request(2), ierr)
+
+    ! -- For the "middle" block --
+    do ind = 2, bl_nb(dir)
+        i_p = ((ind-1)*bl_size) + 1 - bl_size/2
+        bl_lambdaMin(ind,:,:) = minval(p_V(i_p:i_p+bl_size,:,:),1)*cfl
+    end do
+
+    ! -- For the first block (1/2) --
+    ! The domain contains only its second half => use exchanged ghost
+    ! Check reception
+    call mpi_wait(rece_request(2), rece_status, ierr)
+    bl_lambdaMin(1,:,:) = min(lambB(:,:), lambP(:,:))
+
+    ! -- For the last block (1/2) --
+    ! The processus contains only its first half => use exchanged ghost
+    ! Check reception
+    call mpi_wait(rece_request(1), rece_status, ierr)
+    ind = bl_nb(dir) + 1
+    bl_lambdaMin(ind,:,:) = min(lambE(:,:), lambN(:,:))
+
+    ! ===== Compute block type and index =====
+    bl_ind = nint(bl_lambdaMin)
+    bl_type = (bl_lambdaMin<dble(bl_ind))
+
+    ! ===== Tag particles =====
+    do ind = 1, bl_nb(dir)
+        bl_tag(ind,:,:) = ((bl_ind(ind,:,:)/=bl_ind(ind+1,:,:)) .and. &
+                & (bl_type(ind,:,:).neqv.bl_type(ind+1,:,:)))
+    end do
+
+    call mpi_wait(send_request(1), send_status, ierr)
+    call mpi_wait(send_request(2), send_status, ierr)
+
+end subroutine AC_type_and_block_group
+
+
+!> Compute a limitator function from scalar slope - only for corrected lambda 2 formula.
+!!    @param[in]        gp_s        = size of a group (ie number of line it gathers along the two other directions)
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        p_pos       = particles position
+!!    @param[in]        scalar      = scalar advected by particles
+!!    @param[out]       limit       = limitator function
+!! @details
+!!        This subroutine work on a groupe of line. For each line of this group, it
+!!    determine the type of each block of this line and where corrected remeshing
+!!    formula are required. In those points, it tagg block transition (ie the end of
+!!    the current block and the beginning of the following one) in order to indicate
+!!    that corrected weigth have to be used during the remeshing.
+!!         Note that the subroutine actually computes limitator/8 as this is the
+!!    expression which is used inside the remeshing formula and directly computes it
+!!    minimize the number of operations.
+subroutine AC_limitator_from_slopes(direction, gp_s, p_pos, &
+                & deltaS, limit, tag_mpi, com_size)
+
+    
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+    use precision_tools       ! define working precision_tools (double or simple)
+
+    integer                                     :: direction    ! current direction
+    integer, dimension(2),intent(in)            :: gp_s         ! groupe size
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos        ! particle position
+    real(WP), dimension(:,:,:), intent(in)      :: deltaS       ! scalar slope: scalar(i+1)-scalar(i) - for i=1 N_proc+1
+    real(WP), dimension(:,:,:), intent(out)     :: limit        ! limitator function
+    integer, intent(in)                         :: tag_mpi      ! tag for mpi message
+    integer, intent(in)                         :: com_size     ! size of mpi message
+
+    ! Local variables
+    real(WP),dimension(2,gp_s(1),gp_s(2))       :: Sbuffer, Rbuffer ! buffer to exchange scalar or limitator at boundaries with neighbors.
+    integer                                     :: ind          ! loop indice on particle indice
+    real(WP),dimension(gp_s(1),gp_s(2))         :: afl          ! = cfl - [cfl] where [] denotes the nearest int.
+!   integer,dimension(gp_s(1),gp_s(2))          :: afl_sign     ! = sign of afl, ie 1 if afl>=0, -1 if afl<0
+    integer                                     :: send_request ! mpi status of nonblocking send
+    integer, dimension(MPI_STATUS_SIZE)         :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(MPI_STATUS_SIZE)         :: send_status  ! mpi status (for mpi_wait)
+    integer                                     :: ierr         ! mpi error code
+
+    ! ===== Compute slope and limitator =====
+    ! Van Leer limitator function (limit = limitator/8)
+    ! -- For the "middle" and the "last" block --
+    do ind = 2, mesh_sc%N_proc(direction)
+        where(deltaS(:,:,ind)/=0)
+            afl = p_pos(ind,:,:)
+            afl = afl - nint(afl)
+!           afl_sign = int(sign(1._WP,afl))
+!           limit(ind+1,:,:) = (4.0_WP/8._WP)*min(0.9_WP,(afl_sign*afl+0.5_WP)**2)*(deltaS(:,:,ind-afl_sign)/deltaS(:,:,ind))/(1+(deltaS(:,:,ind-afl_sign)/deltaS(:,:,ind)))
+            ! If (p_pos-nint(p_pos))>=0)
+            where(afl>=0)
+                limit(ind+1,:,:) = max(0._WP,(deltaS(:,:,ind-1)/deltaS(:,:,ind)))
+                limit(ind+1,:,:) = limit(ind+1,:,:)/(limit(ind+1,:,:)+1)
+                limit(ind+1,:,:) = (4.0_WP/8._WP)*min(0.9_WP,(afl+0.5_WP)**2)*limit(ind+1,:,:)
+            elsewhere
+                limit(ind+1,:,:) = max(0._WP,(deltaS(:,:,ind+1)/deltaS(:,:,ind)))
+                limit(ind+1,:,:) = limit(ind+1,:,:)/(limit(ind+1,:,:)+1)
+                limit(ind+1,:,:) = (4.0_WP/8._WP)*min(0.9_WP,(afl-0.5_WP)**2)*limit(ind+1,:,:)
+            end where
+        elsewhere
+            limit(ind+1,:,:) = 0.0_WP
+        end where
+    end do
+    ! -- For the "first" block --
+    ! 1 - limit(1) - limitator at 1/2 is already compute on the previous mpi-rank (limit(N_proc+1) !)
+    ! 2 - limit(2) - limitator at 1+1/2 requires deltaS(0) = scalar slope between scalar(0) and scalar(-1) which is already compute on previous rank
+    ! Send these values
+    Sbuffer(1,:,:) = limit(mesh_sc%N_proc(direction)+1,:,:)
+    Sbuffer(2,:,:) = deltaS(:,:,mesh_sc%N_proc(direction))
+    call mpi_ISsend(Sbuffer(1,1,1), com_size, MPI_REAL_WP, &
+            & neighbors(direction,1), tag_mpi, D_comm(direction), send_request, ierr)
+    ! Receive it !
+    call mpi_recv(Rbuffer(1,1,1), com_size, MPI_REAL_WP, &
+            &  neighbors(direction,-1), tag_mpi, D_comm(direction),rece_status, ierr)
+    ! Get limit(1) = limitator at 1/2
+    limit(1,:,:) = Rbuffer(1,:,:)
+    ! Get limit(2) = limitator at 1+1/2
+    where(deltaS(:,:,1)/=0)
+        afl = p_pos(1,:,:)
+        afl = afl - nint(afl)
+        ! If (p_pos-nint(p_pos))>=0)
+        where(afl>=0)
+            limit(2,:,:) = max(0._WP,(Rbuffer(2,:,:)/deltaS(:,:,1)))
+            !            = ( deltaS(:,:,0)/deltaS(:,:,1))
+            limit(2,:,:) = limit(2,:,:)/(1+limit(2,:,:))
+            limit(2,:,:) = (4.0_WP/8._WP)*min(0.9_WP,(afl+0.5_WP)**2)*limit(2,:,:)
+        elsewhere
+            limit(2,:,:) = max(0._WP,(deltaS(:,:,2)/deltaS(:,:,1)))
+            limit(2,:,:) = limit(2,:,:)/(1+limit(2,:,:))
+            limit(2,:,:) = (4.0_WP/8._WP)*min(0.9_WP,(afl-0.5_WP)**2)*limit(2,:,:)
+        end where
+    elsewhere
+        limit(2,:,:) = 0.0_WP
+    end where
+
+    ! Classical (corrected) lambda formula: limitator function = 1
+    ! limit = 1._WP/8._WP
+
+
+    ! ===== Close mpi_ISsend when done =====
+    call mpi_wait(send_request, send_status, ierr)
+
+end subroutine AC_limitator_from_slopes
+
+end module advec_correction
diff --git a/HySoP/src/scalesReduced/particles/advec_line/advecX_line.f90 b/HySoP/src/scalesReduced/particles/advec_line/advecX_line.f90
new file mode 100644
index 0000000000000000000000000000000000000000..e311ef87d49833d1537ca006984951892d56b3b5
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_line/advecX_line.f90
@@ -0,0 +1,227 @@
+!USEFORTEST advec
+!> @addtogroup part
+
+!------------------------------------------------------------------------------
+!
+! MODULE: advecX_line
+!
+!
+! DESCRIPTION:
+!> The module advecX_line is devoted to the simplest implementation of
+!! advection along X axis of a scalar field.
+!
+!> @details
+!> The module advecX_line is devoted to the simplest implementation of
+!! advection along X axis of a scalar field. It is an unoptimized
+!! version, useful to understand the basis and to benchmark the
+!! optimisation done.
+!! It used particle method and provides a parallel implementation.
+!!
+!! This module can use the method and variables defined in the module
+!! "advec_common_line" which gather information and tools shared for advection along
+!! x, y and z-axis.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advecX_line
+
+    use precision_tools
+    use advec_abstract_proc
+
+    implicit none
+
+    ! ===== Public procedures =====
+    !> particles solver with remeshing method at order 2
+    public                  :: advecX_calc_line ! remeshing method at order 2
+    !----- (corrected) Remeshing method (these methods are set to public in validation purposes) -----
+    public                  :: Xremesh_O2_line  ! order 2
+
+    ! ===== Private porcedures =====
+    ! Particles initialisation
+    private                 :: advecX_init_line ! initialisation for only one line of particles
+
+    ! ===== Private variable ====
+    ! particles solver with different remeshing formula
+    integer, dimension(2), private  :: gpX_size
+    !> Current direction = along X
+    integer, private, parameter     :: direction=1
+    !> Group size along current direction
+    !integer, private, dimension(2)  :: gs
+
+contains
+
+! #####################################################################################
+! #####                                                                           #####
+! #####                         Public procedure                                  #####
+! #####                                                                           #####
+! #####################################################################################
+
+! =================================================================
+! ====================         Solveur         ====================
+! =================================================================
+
+!> Advection during a time step dt - order 2 - "line one by one" version
+!!    @param[in]        dt      = time step
+!!    @param[in]        Vx      = velocity along y (could be discretised on a bigger mesh then the scalar)
+!!    @param[in,out]    scal3D   = scalar field to advect
+subroutine advecX_calc_line(dt,Vx,scal3D)
+
+    use advec_common_line   ! Some procedures common to advection along all directions
+    use advec_variables     ! contains info about solver parameters and others.
+    use cart_topology       ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(mesh_sc%N_proc(1), mesh_sc%N_proc(2), mesh_sc%N_proc(3)), intent(in)    :: Vx
+    real(WP), dimension(mesh_sc%N_proc(1), mesh_sc%N_proc(2), mesh_sc%N_proc(3)), intent(inout) :: scal3D
+    ! Other local variables
+    integer                             :: j,k          ! indice of the currend mesh point
+    integer, dimension(2)               :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    integer                             :: direction=1  ! current direction = along Y
+    real(WP), dimension(mesh_sc%N_proc(1))      :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(mesh_sc%N_proc(1))      :: p_V          ! particles velocity 
+    logical, dimension(bl_nb(1)+1)      :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(1))        :: bl_tag       ! indice of tagged particles
+
+    ind_group = 0
+    do k = 1, mesh_sc%N_proc(3)
+        ind_group(2) = ind_group(2) + 1
+        ind_group(1) = 0
+        do j = 1, mesh_sc%N_proc(2)
+        ind_group(1) = ind_group(1) + 1
+
+            ! ===== Init particles =====
+            call advecX_init_line(Vx, j, k, p_pos_adim, p_V)
+
+            ! ===== Advection =====
+            ! -- Compute velocity (with a RK2 scheme) --
+            call AC_particle_velocity_line(dt, direction, ind_group, p_pos_adim, p_V)
+            ! -- Advec particles --
+            p_pos_adim = p_pos_adim + dt*p_V/mesh_sc%dx(direction)
+
+            ! ===== Remeshing =====
+            ! -- Pre-Remeshing: Determine blocks type and tag particles --
+            call AC_type_and_block_line(dt, direction, ind_group, p_V, &
+                    & bl_type, bl_tag)
+            ! -- Remeshing --
+            call Xremesh_O2_line(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal3D)
+
+        end do
+    end do
+
+end subroutine advecX_calc_line
+
+
+! #####################################################################################
+! #####                                                                           #####
+! #####                         Private procedure                                 #####
+! #####                                                                           #####
+! #####################################################################################
+
+! ====================================================================
+! ====================   Remeshing subroutines    ====================
+! ====================================================================
+
+!> remeshing with an order 2 method, corrected to allow large CFL number - untagged particles
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in,out]    scal        = scalar field to advect
+subroutine Xremesh_O2_line(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal)
+
+    use advec_common_line       ! Some procedures common to advection along all directions
+    use advec_remeshing_line    ! Remeshing formula
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, intent(in)                                                 :: j, k
+    logical, dimension(:), intent(in)                                   :: bl_type
+    logical, dimension(:), intent(in)                                   :: bl_tag
+    real(WP), dimension(:), intent(in)                                  :: p_pos_adim
+    real(WP), dimension(mesh_sc%N_proc(1), mesh_sc%N_proc(2), mesh_sc%N_proc(3)), intent(inout) :: scal
+    ! Other local variables
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2)               :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
+                                                        ! I will receive data
+    integer                             :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer                             :: proc_max     ! smaller gap between me and the processes to where I send data
+
+
+    !  -- Compute ranges --
+    if (bl_type(1)) then
+        ! First particle is a centered one
+        send_j_min = nint(p_pos_adim(1))-1
+    else
+        ! First particle is a left one
+        send_j_min = floor(p_pos_adim(1))-1
+    end if
+    if (bl_type(mesh_sc%N_proc(direction)/bl_size +1)) then
+        ! Last particle is a centered one
+        send_j_max = nint(p_pos_adim(mesh_sc%N_proc(direction)))+1
+    else
+        ! Last particle is a left one
+        send_j_max = floor(p_pos_adim(mesh_sc%N_proc(direction)))+1
+    end if
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_line(send_j_min, send_j_max, direction, ind_group, proc_min, proc_max, rece_proc)
+
+    ! -- Allocate buffer for remeshing of local particles --
+    allocate(send_buffer(send_j_min:send_j_max))
+    send_buffer = 0.0;
+
+    ! -- Remesh the particles in the buffer --
+    call AC_remesh_lambda2corrected_basic(direction, p_pos_adim, scal(:,j,k), bl_type, bl_tag, send_j_min, send_j_max, send_buffer)
+
+    ! -- Send the buffer to the matching processus and update the scalar field --
+    scal(:,j,k) = 0
+    call AC_bufferToScalar_line(direction, ind_group , send_j_min, send_j_max, proc_min, proc_max, &
+        & rece_proc, send_buffer, scal(:,j,k))
+
+    ! Deallocate all field
+    deallocate(send_buffer)
+
+end subroutine Xremesh_O2_line
+
+
+! ====================================================================
+! ====================    Initialize particle     ====================
+! ====================================================================
+
+!> Creation and initialisation of a particle line (ie Y and Z coordinate are fixed)
+!!    @param[in]    Vx          = 3D velocity field
+!!    @param[in]    j           = Y-indice of the current line
+!!    @param[in]    k           = Z-indice of the current line
+!!    @param[out]   p_pos_adim  = adimensioned particles postion
+!!    @param[out]   p_V         = particle velocity
+subroutine advecX_init_line(Vx, j, k, p_pos_adim, p_V)
+
+    use cart_topology   ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                                 :: j,k
+    real(WP), dimension(mesh_sc%N_proc(direction)), intent(out) :: p_pos_adim, p_V
+    real(WP), dimension(:,:,:), intent(in)              :: Vx
+    ! Other local variables
+    integer                                     :: ind          ! indice
+
+    do ind = 1, mesh_sc%N_proc(direction)
+        p_pos_adim(ind) = ind
+        p_V(ind)        = Vx(ind,j,k)
+    end do
+
+end subroutine advecX_init_line
+
+end module advecX_line
+!> @}
diff --git a/HySoP/src/scalesReduced/particles/advec_line/advecY_line.f90 b/HySoP/src/scalesReduced/particles/advec_line/advecY_line.f90
new file mode 100644
index 0000000000000000000000000000000000000000..2e7d1c4f0f099c420f671eded1ad69dc6018b1b8
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_line/advecY_line.f90
@@ -0,0 +1,222 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+
+!------------------------------------------------------------------------------
+!
+! MODULE: advecY_line
+!
+!
+! DESCRIPTION:
+!> The module advecY_line is devoted to the simplest implementation of
+!! advection along Y axis of a scalar field.
+!
+!> @details
+!> The module advecY_line is devoted to the simplest implementation of
+!! advection along Y axis of a scalar field. It is an unoptimized
+!! version, useful to understand the basis and to benchmark the
+!! optimisation done.
+!! It used particle method and provides a parallel implementation.
+!!
+!! This module can use the method and variables defined in the module
+!! "advec_common_line" which gather information and tools shared for advection along
+!! x, y and z-axis.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advecY_line
+
+    use precision_tools
+    use advec_abstract_proc
+
+    implicit none
+
+    ! ===== Public procedures =====
+    !> particles solver with remeshing method at order 2
+    private                 :: advecY_calc_line     ! remeshing method at order 2
+    !----- (corrected) Remeshing method (these methods are set to public in validation purposes) -----
+    public                  :: Yremesh_O2_line       ! order 2
+
+    ! ===== Private porcedures =====
+    ! Particles initialisation
+    private                 :: advecY_init_line ! initialisation for only one line of particles
+
+    ! ===== Private variables =====
+    !> current direction = alongY (to avoid redefinition and make more easy cut/paste)
+    integer, parameter, private      :: direction = 2
+    !> Group size along current direction
+!    integer, private, dimension(2)  :: gs
+
+contains
+
+
+! #####################################################################################
+! #####                                                                           #####
+! #####                         Public procedure                                  #####
+! #####                                                                           #####
+! #####################################################################################
+
+
+!> Advection along Y during a time step dt - order 2 - "line one by one" version
+!!    @param[in]        dt      = time step
+!!    @param[in]        Vy      = velocity along y (could be discretised on a bigger mesh then the scalar)
+!!    @param[in,out]    scal3D   = scalar field to advect
+subroutine advecY_calc_line(dt,Vy,scal3D)
+
+    use advec_common_line          ! some procedures common to advection along all directions
+    use advec_variables       ! contains info about solver parameters and others.
+    use cart_topology   ! description of mesh and of mpi topology
+
+    ! input/output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(mesh_sc%N_proc(1), mesh_sc%N_proc(2), mesh_sc%N_proc(3)), intent(in)    :: Vy
+    real(WP), dimension(mesh_sc%N_proc(1), mesh_sc%N_proc(2), mesh_sc%N_proc(3)), intent(inout) :: scal3D
+    ! other local variables
+    integer                                 :: i,k          ! indice of the currend mesh point
+    integer, dimension(direction)           :: ind_group    ! indice of the currend group of line ((i,k) by default)
+    real(WP), dimension(mesh_sc%N_proc(direction))  :: p_pos_adim   ! adimensionned particles position 
+    real(WP), dimension(mesh_sc%N_proc(direction))  :: p_V          ! particles velocity 
+    logical, dimension(bl_nb(direction)+1)  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction))    :: bl_tag       ! indice of tagged particles
+
+    ind_group = 0
+    do k = 1, mesh_sc%N_proc(3)
+        ind_group(2) = ind_group(2) + 1
+        ind_group(1) = 0
+        do i = 1, mesh_sc%N_proc(1)
+            ind_group(1) = ind_group(1) + 1
+
+            ! ===== Init particles =====
+            call advecY_init_line(Vy, i, k, p_pos_adim, p_V)
+
+            ! ===== Advection =====
+            ! -- Compute velocity (with a RK2 scheme) --
+            call AC_particle_velocity_line(dt, direction, ind_group, p_pos_adim, p_V)
+            ! -- Advec particles --
+            p_pos_adim = p_pos_adim + dt*p_V/mesh_sc%dx(direction)
+
+            ! ===== Remeshing =====
+            ! -- Pre-Remeshing: Determine blocks type and tag particles --
+            call AC_type_and_block_line(dt, direction, ind_group, p_V, bl_type, bl_tag)
+            ! -- Remeshing --
+            call Yremesh_O2_line(ind_group, p_pos_adim, bl_type, bl_tag,i,k,scal3D)
+        end do
+    end do
+
+
+end subroutine advecY_calc_line
+
+
+! #####################################################################################
+! #####                                                                           #####
+! #####                         Private procedure                                 #####
+! #####                                                                           #####
+! #####################################################################################
+
+! ====================================================================
+! ====================   Remeshing subroutines    ====================
+! ====================================================================
+
+!> remeshing with an order 2 method, corrected to allow large CFL number - untagged particles
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+!!    @param[in]        i,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in,out]    scal        = scalar field to advect
+subroutine Yremesh_O2_line(ind_group, p_pos_adim, bl_type, bl_tag,i,k,scal)
+
+    use advec_common_line            ! Some procedures common to advection along all directions
+    use advec_remeshing_line    ! Remeshing formula
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, intent(in)                                                 :: i, k
+    logical, dimension(:), intent(in)                                   :: bl_type
+    logical, dimension(:), intent(in)                                   :: bl_tag
+    real(WP), dimension(:), intent(in)                                  :: p_pos_adim
+    real(WP), dimension(mesh_sc%N_proc(1), mesh_sc%N_proc(2), mesh_sc%N_proc(3)), intent(inout) :: scal
+    ! Other local variables 
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it 
+                                                        ! to the right subdomain
+    integer, dimension(2)               :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
+                                                        ! I will receive data
+    integer                             :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer                             :: proc_max     ! smaller gap between me and the processes to where I send data
+
+    !  -- Compute ranges for remeshing of local particles --
+    if (bl_type(1)) then
+        ! First particle is a centered one
+        send_j_min = nint(p_pos_adim(1))-1
+    else
+        ! First particle is a left one
+        send_j_min = floor(p_pos_adim(1))-1
+    end if
+    if (bl_type(mesh_sc%N_proc(direction)/bl_size +1)) then
+        ! Last particle is a centered one
+        send_j_max = nint(p_pos_adim(mesh_sc%N_proc(direction)))+1
+    else
+        ! Last particle is a left one
+        send_j_max = floor(p_pos_adim(mesh_sc%N_proc(direction)))+1
+    end if
+        
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_line(send_j_min, send_j_max, direction, ind_group, proc_min, proc_max, rece_proc)
+
+    ! -- Allocate buffer for remeshing of local particles --
+    allocate(send_buffer(send_j_min:send_j_max))
+    send_buffer = 0.0;
+
+    ! -- Remesh the particles in the buffer --
+    call AC_remesh_lambda2corrected_basic(direction, p_pos_adim, scal(i,:,k), bl_type, bl_tag, send_j_min, send_j_max, send_buffer)
+    
+    ! -- Send the buffer to the matching processus and update the scalar field --
+    scal(i,:,k) = 0
+    call AC_bufferToScalar_line(direction, ind_group , send_j_min, send_j_max, proc_min, proc_max, &
+        & rece_proc, send_buffer, scal(i,:,k))
+
+    ! -- Deallocate all field --
+    deallocate(send_buffer)
+
+end subroutine Yremesh_O2_line
+
+
+! ====================================================================
+! ====================    Initialize particle     ====================
+! ====================================================================
+
+!> Creation and initialisation of a particle line (ie X and Z coordinate are fixed)
+!!    @param[in]    Vy          = 3D velocity field
+!!    @param[in]    i           = X-indice of the current line
+!!    @param[in]    k           = Z-indice of the current line
+!!    @param[out]   p_pos_adim  = adimensioned particles postion
+!!    @param[out]   p_V         = particle velocity 
+subroutine advecY_init_line(Vy, i, k, p_pos_adim, p_V)
+
+    use cart_topology   ! description of mesh and of mpi topology
+
+    ! input/output
+    integer, intent(in)                                 :: i,k
+    real(WP), dimension(mesh_sc%N_proc(direction)), intent(out) :: p_pos_adim, p_V
+    real(WP), dimension(:,:,:), intent(in)              :: Vy
+    ! Other local variables
+    integer                                     :: ind          ! indice
+
+    do ind = 1, mesh_sc%N_proc(direction)
+        p_pos_adim(ind) = ind
+        p_V(ind)        = Vy(i,ind,k)
+    end do
+
+end subroutine advecY_init_line
+
+end module advecY_line
+!> @}
diff --git a/HySoP/src/scalesReduced/particles/advec_line/advecZ_line.f90 b/HySoP/src/scalesReduced/particles/advec_line/advecZ_line.f90
new file mode 100644
index 0000000000000000000000000000000000000000..3a5e0a01481e0616ec70aad5c4037215536a311c
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_line/advecZ_line.f90
@@ -0,0 +1,221 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+
+!------------------------------------------------------------------------------
+!
+! MODULE: advecZ_line
+!
+!
+! DESCRIPTION: 
+!> The module advecZ_line is devoted to the simplest implementation of
+!! advection along Z axis of a scalar field.
+!
+!> @details
+!> The module advecZ_line is devoted to the simplest implementation of
+!! advection along Z axis of a scalar field. It is an unoptimized 
+!! version, useful to understand the basis and to benchmark the
+!! optimisation done.
+!! It used particle method and provides a parallel implementation.
+!!
+!! This module can use the method and variables defined in the module
+!! "advec_common_line" which gather information and tools shared for advection along
+!! x, y and z-axis.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advecZ_line
+
+    use precision_tools
+    use advec_abstract_proc
+
+    implicit none
+
+    ! ===== Public procedures =====
+    ! particles solver with different remeshing formula
+    private                 :: advecZ_calc_line ! remeshing method at order 2
+    !----- (corrected) Remeshing method (these methods are set to public in validation purposes) -----
+    public                  :: Zremesh_O2_line  ! order 2
+
+    ! ===== Private porcedures =====
+    ! Particles initialisation
+    private                 :: advecZ_init_line ! initialisation for only one line of particles
+
+    ! ===== Private variable ====
+    !> Current direction = 3 ie along Z
+    integer, parameter, private     :: direction = 3
+    !> Group size along current direction
+    !integer, private, dimension(2)  :: gs
+
+contains
+
+! #####################################################################################
+! #####                                                                           #####
+! #####                         Public procedure                                  #####
+! #####                                                                           #####
+! #####################################################################################
+
+!> Advection along Z during a time step dt - order 2 - "line one by one" version
+!!    @param[in]        dt      = time step
+!!    @param[in]        Vz      = velocity along y (could be discretised on a bigger mesh then the scalar)
+!!    @param[in,out]    scal3D   = scalar field to advect
+subroutine advecZ_calc_line(dt,Vz,scal3D)
+
+    use advec_common_line    ! some procedures common to advection along all directions
+    use advec_variables ! contains info about solver parameters and others.
+    use cart_topology   ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    real(WP), intent(in)                                                :: dt
+    real(WP), dimension(mesh_sc%N_proc(1), mesh_sc%N_proc(2), mesh_sc%N_proc(3)), intent(in)    :: Vz
+    real(WP), dimension(mesh_sc%N_proc(1), mesh_sc%N_proc(2), mesh_sc%N_proc(3)), intent(inout) :: scal3D
+    ! Other local variables
+    integer                                 :: i,j          ! indice of the currend mesh point
+    integer, dimension(2)                   :: ind_group    ! indice of the currend group of line (=(i,k) by default)
+    real(WP), dimension(mesh_sc%N_proc(direction))  :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(mesh_sc%N_proc(direction))  :: p_V          ! particles velocity
+    logical, dimension(bl_nb(direction)+1)  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction))    :: bl_tag       ! indice of tagged particles
+
+    ind_group = 0
+    do j = 1, mesh_sc%N_proc(2)
+        ind_group(2) = ind_group(2) + 1
+        ind_group(1) = 0
+        do i = 1, mesh_sc%N_proc(1)
+            ind_group(1) = ind_group(1) + 1
+
+            ! ===== Init particles =====
+            call advecZ_init_line(Vz, i, j, p_pos_adim, p_V)
+
+            ! ===== Advection =====
+            ! -- Compute velocity (with a RK2 scheme) --
+            call AC_particle_velocity_line(dt, direction, ind_group, p_pos_adim, p_V)
+            ! -- Advec particles --
+            p_pos_adim = p_pos_adim + dt*p_V/mesh_sc%dx(direction)
+
+            ! ===== Remeshing =====
+            ! -- Pre-Remeshing: Determine blocks type and tag particles --
+            call AC_type_and_block_line(dt, direction, ind_group, p_V, bl_type, bl_tag)
+            ! -- Remeshing --
+            call Zremesh_O2_line(ind_group, p_pos_adim, bl_type, bl_tag,i,j,scal3D)
+
+        end do
+    end do
+
+end subroutine advecZ_calc_line
+
+
+! #####################################################################################
+! #####                                                                           #####
+! #####                         Private procedure                                 #####
+! #####                                                                           #####
+! #####################################################################################
+
+! ====================================================================
+! ====================   Remeshing subroutines    ====================
+! ====================================================================
+
+!> remeshing with an order 2 method, corrected to allow large CFL number - untagged particles
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+!!    @param[in]        i,j         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in,out]    scal        = scalar field to advect
+subroutine Zremesh_O2_line(ind_group, p_pos_adim, bl_type, bl_tag,i,j,scal)
+
+    use advec_common_line            ! Some procedures common to advection along all directions
+    use advec_remeshing_line ! Remeshing formula
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, dimension(2), intent(in)                                   :: ind_group
+    integer, intent(in)                                                 :: i, j
+    logical, dimension(:), intent(in)                                   :: bl_type
+    logical, dimension(:), intent(in)                                   :: bl_tag
+    real(WP), dimension(:), intent(in)                                  :: p_pos_adim
+    real(WP), dimension(mesh_sc%N_proc(1), mesh_sc%N_proc(2), mesh_sc%N_proc(3)), intent(inout) :: scal
+    ! Other local variables 
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it
+                                                        ! to the right subdomain
+    integer, dimension(2)               :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which 
+                                                        ! I will receive data
+    integer                             :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer                             :: proc_max     ! smaller gap between me and the processes to where I send data
+    
+
+    !  -- Compute ranges for remeshing of local particles --
+    if (bl_type(1)) then
+        ! First particle is a centered one
+        send_j_min = nint(p_pos_adim(1))-1
+    else
+        ! First particle is a left one
+        send_j_min = floor(p_pos_adim(1))-1
+    end if
+    if (bl_type(mesh_sc%N_proc(direction)/bl_size +1)) then
+        ! Last particle is a centered one
+        send_j_max = nint(p_pos_adim(mesh_sc%N_proc(direction)))+1
+    else
+        ! Last particle is a left one
+        send_j_max = floor(p_pos_adim(mesh_sc%N_proc(direction)))+1
+    end if
+        
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_line(send_j_min, send_j_max, direction, ind_group, proc_min, proc_max, rece_proc)
+        
+    !  -- Allocate and initialize the buffer --
+    allocate(send_buffer(send_j_min:send_j_max))
+    send_buffer = 0.0;
+
+    ! -- Remesh the particles in the buffer --
+    call AC_remesh_lambda2corrected_basic(direction, p_pos_adim, scal(i,j,:), bl_type, bl_tag, send_j_min, send_j_max, send_buffer)
+    
+    ! -- Send the buffer to the matching processus and update the scalar field --
+    scal(i,j,:) = 0
+    call AC_bufferToScalar_line(direction, ind_group , send_j_min, send_j_max, proc_min, proc_max, &
+        & rece_proc, send_buffer, scal(i,j,:))
+
+    ! Deallocate all field
+    deallocate(send_buffer)
+
+end subroutine Zremesh_O2_line
+
+
+! ====================================================================
+! ====================    Initialize particle     ====================
+! ====================================================================
+
+!> Creation and initialisation of a particle line (ie X and Y coordinate are fixed)
+!!    @param[in]    Vz          = 3D velocity field
+!!    @param[in]    i           = X-indice of the current line
+!!    @param[in]    j           = Y-indice of the current line
+!!    @param[out]   p_pos_adim  = adimensioned particles postion
+!!    @param[out]   p_V         = particle velocity 
+subroutine advecZ_init_line(Vz, i, j, p_pos_adim, p_V)
+
+    use cart_topology   ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                                 :: i,j
+    real(WP), dimension(mesh_sc%N_proc(direction)), intent(out) :: p_pos_adim, p_V
+    real(WP), dimension(:,:,:), intent(in)              :: Vz
+    ! Other local variables
+    integer                                             :: ind  ! indice
+
+    do ind = 1, mesh_sc%N_proc(direction)
+        p_pos_adim(ind) = ind
+        p_V(ind)        = Vz(i,j,ind)
+    end do
+
+end subroutine advecZ_init_line
+
+end module advecZ_line
+!> @}
diff --git a/HySoP/src/scalesReduced/particles/advec_line/advec_common_line.f90 b/HySoP/src/scalesReduced/particles/advec_line/advec_common_line.f90
new file mode 100644
index 0000000000000000000000000000000000000000..dce066f350de432437d851ab557c74a1b17860de
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_line/advec_common_line.f90
@@ -0,0 +1,808 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_common_line
+!
+!
+! DESCRIPTION:
+!> The module ``advec_common_line'' gather function and subroutines used to advec scalar
+!! which are not specific to a direction. It contains some ``old''
+!! functions from ``advec_common'' which are not optimized.
+!! @details
+!! This module gathers functions and routines used to advec scalar which are not
+!! specific to a direction. More precisly, it provides function similar to
+!! ``advec_common'' but which only work on single line rather than of
+!! group line. Considering how mpi parallelism works, working on single
+!! line are not opptimal. Therefore, these function are onbly here for
+!! debbugging and testing purposes. They also could be used to compute
+!! some spped-up. They are more simple and basic but less efficients.
+!!
+!!      This module is automatically load when advec_common is used.
+!! Moreover, advec_common contains all interface to automatically use
+!! the right function whenever you want work on single line or on group of
+!! lines.
+!! Except for testing purpose, this module is not supposed to be used by the
+!! main code but only by the other advection module. More precisly, an final user
+!! must only used the generic "advec" module wich contain all the interface to
+!! solve the advection equation with the particle method, and to choose the
+!! remeshing formula, the dimensionnal splitting and everything else.
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_common_line
+
+    use precision_tools
+    use mpi, only: MPI_INTEGER, MPI_STATUS_SIZE, MPI_ANY_SOURCE
+    implicit none
+
+! ===== Public procedures =====
+
+!----- To interpolate velocity -----
+public                              :: AC_obtain_receivers_line
+public                              :: AC_particle_velocity_line
+!----- Determine block type and tag particles -----
+public                              :: AC_type_and_block_line
+!----- To remesh particles -----
+public                              :: AC_obtain_senders_line
+public                              :: AC_bufferToScalar_line
+
+contains
+
+! ===== Public procedure =====
+
+
+! ==================================================================================
+! ====================     Compute particle velocity (RK2)      ====================
+! ==================================================================================
+
+!> Determine the set of processes wich will send me information during the velocity interpolation.
+!!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]    ind_group       = coordinate of the current group of lines
+!!    @param[in]    rece_ind_min    = minimal indice of mesh involved in remeshing particles (of the my local subdomains)
+!!    @param[in]    rece_ind_max    = maximal indice of mesh involved in remeshing particles (of the my local subdomains)
+!!    @param[out]   send_gap        = gap between my coordinate and the processes of minimal coordinate which will send information to me
+!!    @param[out]   rece_gap        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+!! @details
+!!    Obtain the list of processus wich need a part of my local velocity field
+!!    to interpolate the velocity used in the RK2 scheme to advect its particles.
+subroutine AC_obtain_receivers_line(direction, ind_group, rece_ind_min, rece_ind_max, send_gap, rece_gap)
+! XXX Work only for periodic condition.
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+    
+
+
+    ! Input/Ouput
+    integer, intent(in)                 :: rece_ind_min, rece_ind_max
+    integer, intent(in)                 :: direction
+    integer, dimension(2), intent(in)   :: ind_group
+    integer, dimension(2), intent(out)  :: rece_gap, send_gap
+    integer, dimension(MPI_STATUS_SIZE) :: statut
+    ! Others
+    integer                             :: proc_gap         ! gap between a processus coordinate (along the current
+                                                            ! direction) into the mpi-topology and my coordinate
+    integer                             :: rece_gapP        ! gap between the coordinate of the previous processus (in the current direction)
+                                                            ! and the processes of maximal coordinate which will receive information from it
+    integer                             :: rece_gapN        ! same as above but for the next processus
+    integer                             :: rankP, rankN     ! processus rank for shift (P= previous, N = next)
+    integer                             :: tag_min, tag_max ! mpi message tag (for communicate rece_proc(1) and rece_proc(2))
+    integer                             :: send_request     ! mpi status of nonblocking send
+    integer                             :: send_request_bis ! mpi status of nonblocking send
+    integer                             :: ierr             ! mpi error code
+    integer, dimension(2)               :: tag_table        ! some mpi message tag
+    logical, dimension(:,:), allocatable:: test_request
+    integer, dimension(:,:), allocatable:: s_request
+
+    tag_min = 5
+    tag_max = 6
+
+    send_gap = 3*mesh_sc%N(direction)
+
+    rece_gap(1) = floor(real(rece_ind_min-1, WP)/mesh_sc%N_proc(direction))
+    rece_gap(2) = floor(real(rece_ind_max-1, WP)/mesh_sc%N_proc(direction))
+
+    ! ===== Communicate with my neigbors -> obtain ghost ! ====
+    ! Compute their rank
+    call mpi_cart_shift(D_comm(direction), 0, 1, rankP, rankN, ierr)
+    ! Inform that about processus from which I need information
+    tag_table = compute_tag(ind_group, tag_obtrec_ghost_NP, direction)
+    call mpi_Isend(rece_gap(1), 1, MPI_INTEGER, rankP, tag_table(1), D_comm(direction), send_request, ierr)
+    call mpi_Isend(rece_gap(2), 1, MPI_INTEGER, rankN, tag_table(2), D_comm(direction), send_request_bis, ierr)
+    ! Receive the same message form my neighbors
+    call mpi_recv(rece_gapN, 1, MPI_INTEGER, rankN, tag_table(1), D_comm(direction), statut, ierr)
+    call mpi_recv(rece_gapP, 1, MPI_INTEGER, rankP, tag_table(2), D_comm(direction), statut, ierr)
+
+
+    ! ===== Send information if I am first or last =====
+    allocate(s_request(rece_gap(1):rece_gap(2),2))
+    allocate(test_request(rece_gap(1):rece_gap(2),2))
+    test_request = .false.
+    tag_table = compute_tag(ind_group, tag_obtrec_NP, direction)
+    do proc_gap = rece_gap(1), rece_gap(2)
+        ! Compute the rank of the target processus
+        call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
+        ! Determine if I am the the first or the last processes (considering the current directory)
+            ! to require information from this processus
+        if (proc_gap>rece_gapP-1) then
+            if(rankN /= D_rank(direction)) then
+                call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(1), D_comm(direction), s_request(proc_gap,1), ierr)
+                test_request(proc_gap,1) = .true.
+            else
+                send_gap(1) = -proc_gap
+            end if
+        end if
+        if (proc_gap<rece_gapN+1) then
+            if(rankN /= D_rank(direction)) then
+                test_request(proc_gap,2) = .true.
+                call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(2), D_comm(direction), s_request(proc_gap,2), ierr)
+            else
+                send_gap(2) = -proc_gap
+            end if
+        end if
+    end do
+
+
+    ! ===== Receive information form the first and the last processus which need a part of my local velocity field =====
+    if (send_gap(1) == 3*mesh_sc%N(direction)) then
+        call mpi_recv(send_gap(1), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+    end if
+    if (send_gap(2) == 3*mesh_sc%N(direction)) then
+        call mpi_recv(send_gap(2), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+    end if
+
+
+    call MPI_WAIT(send_request,statut,ierr)
+    call MPI_WAIT(send_request_bis,statut,ierr)
+    do proc_gap = rece_gap(1), rece_gap(2)
+        if (test_request(proc_gap,1).eqv. .true.) call MPI_WAIT(s_request(proc_gap,1),statut,ierr)
+        if (test_request(proc_gap,2)) call MPI_WAIT(s_request(proc_gap,2),statut,ierr)
+    end do
+    deallocate(s_request)
+    deallocate(test_request)
+
+end subroutine AC_obtain_receivers_line
+
+
+!> Interpolate the velocity field used in a RK2 scheme for particle advection.
+!!    @param[in]        dt          = time step
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        p_pos_adim  = adimensionned particle postion
+!!    @param[in,out]    p_V         = particle velocity (along the current direction)
+!! @details
+!!    A RK2 scheme is used to advect the particles : the midlle point scheme. An
+!!    intermediary position "p_pos_bis(i) = p_pos(i) + V(i)*dt/2" is computed and then
+!!    the numerical velocity of each particles is computed as the interpolation of V  in
+!!    this point. This field is used to advect the particles at the seconde order in time :
+!!    p_pos(t+dt, i) = p_pos(i) + p_V(i).
+!!    The group line indice is used to ensure using unicity of each mpi message tag.
+subroutine AC_particle_velocity_line(dt, direction, ind_group, p_pos_adim, p_V)
+
+    ! This code involve a recopy of p_V. It is possible to directly use the 3D velocity field but in a such code
+    ! a memory copy is still needed to send velocity field to other processus : mpi send contiguous memory values
+
+    
+    use structure_tools
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Ouput
+    real(WP), intent(in)                            :: dt       ! time step
+    integer, intent(in)                             :: direction
+    integer, dimension(2), intent(in)               :: ind_group
+    real(WP), dimension(:), intent(in)              :: p_pos_adim
+    real(WP), dimension(:), intent(inout)           :: p_V
+    ! Others, local
+    real(WP), dimension(mesh_sc%N_proc(direction))          :: p_pos_bis    ! adimensionned position of the middle point
+    real(WP), dimension(mesh_sc%N_proc(direction)), target  :: p_V_bis      ! velocity of the middle point
+    real(WP), dimension(mesh_sc%N_proc(direction))          :: weight       ! interpolation weight
+    type(real_pter), dimension(mesh_sc%N_proc(direction))   :: Vp, Vm       ! Velocity on previous and next mesh point
+    real(WP), dimension(:), allocatable, target     :: V_buffer     ! Velocity buffer for postion outside of the local subdomain
+    integer                                         :: size_buffer  ! buffer size
+    integer                                         :: rece_ind_min ! the minimal indice used in velocity interpolation
+    integer                                         :: rece_ind_max ! the maximal indice used in velocity interpolation
+    integer                                         :: ind, ind_com ! indices
+    integer                                         :: pos, pos_old ! indices of the mesh point wich preceed the particle position
+    integer                                         :: proc_gap, gap! distance between my (mpi) coordonate and coordinate of the
+                                                                    ! processus associated to a given position
+    integer, dimension(:), allocatable              :: rece_rank    ! rank of processus wich send me information
+    integer                                         :: send_rank    ! rank of processus to wich I send information
+    integer                                         :: rankP        ! rank of processus ("source rank" returned by mpi_cart_shift)
+    integer, dimension(2)                           :: rece_range   ! range of the velocity fields I want to receive
+    integer, dimension(2)                           :: send_range   ! range of the velocity fields I send
+    integer, dimension(2)                           :: rece_gap     ! distance between me and processus wich send me information
+    integer, dimension(2)                           :: send_gap     ! distance between me and processus to wich I send information
+    integer                                         :: msg_size     ! size of message send/receive
+    integer                                         :: tag          ! mpi message tag
+    integer                                         :: ierr         ! mpi error code
+    integer, dimension(:), allocatable              :: s_request    ! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable              :: s_request_bis! mpi communication request (handle) of nonblocking send
+    integer, dimension(:), allocatable              :: rece_request ! mpi communication request (handle) of nonblocking receive
+    integer, dimension(MPI_STATUS_SIZE)             :: rece_status  ! mpi status (for mpi_wait)
+
+    ! -- Initialisation --
+    ind_com = 0
+    do ind = 1, mesh_sc%N_proc(direction)
+        nullify(Vp(ind)%pter)
+        nullify(Vm(ind)%pter)
+    end do
+    ! Compute the midlle point
+    p_pos_bis = p_pos_adim + (dt/2.0)*p_V/mesh_sc%dx(direction)
+    p_V_bis = p_V
+    ! Compute range of the set of point where I need the velocity value
+    rece_ind_min = floor(p_pos_bis(1))
+    rece_ind_max = floor(p_pos_bis(mesh_sc%N_proc(direction))) + 1
+    ! Allocate the buffer
+    ! If rece_ind_min and rece_ind_max are not in [mesh_sc%N_proc(direction);1] then it will change the number of communication
+    ! size_buffer = max(temp - mesh_sc%N_proc(direction), 0) - min(0, temp)
+    !size_buffer = - max(temp - mesh_sc%N_proc(direction), 0) - min(0, temp)
+    ! It must work, but for first test we prefer compute size_buffer more simply
+    size_buffer = 0
+
+    ! -- Exchange non blocking message to do the computations during the
+    ! communication process --
+    call AC_obtain_receivers_line(direction, ind_group, rece_ind_min, rece_ind_max, send_gap, rece_gap)
+    allocate(rece_rank(rece_gap(1):rece_gap(2)))
+    ! Send messages about what I want
+    allocate(s_request_bis(rece_gap(1):rece_gap(2)))
+    do proc_gap = rece_gap(1), rece_gap(2)
+        call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rece_rank(proc_gap), ierr)
+        if (rece_rank(proc_gap) /= D_rank(direction)) then
+            ! Range I want
+            gap = proc_gap*mesh_sc%N_proc(direction)
+            rece_range(1) = max(rece_ind_min, gap+1) ! fortran => indice start from 0
+            rece_range(2) = min(rece_ind_max, gap+mesh_sc%N_proc(direction))
+            ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.
+            tag = compute_tag(ind_group, tag_velo_range, direction, proc_gap)
+            ! Send message
+            size_buffer = size_buffer + (rece_range(2)-rece_range(1)) + 1
+            call mpi_ISsend(rece_range(1), 2, MPI_INTEGER, rece_rank(proc_gap), &
+                & tag, D_comm(direction), s_request_bis(proc_gap),ierr)
+        end if
+    end do
+    allocate(V_buffer(max(size_buffer,1)))
+    V_buffer = 0
+    ! Send the velocity field to processus which need it
+    allocate(s_request(send_gap(1):send_gap(2)))
+    do proc_gap = send_gap(1), send_gap(2)
+        call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+        if (send_rank /= D_rank(direction)) then
+            ! I - Receive messages about what I have to send
+            ! Ia - Compute reception tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id.
+            tag = compute_tag(ind_group, tag_velo_range, direction, -proc_gap)
+            ! Ib - Receive the message
+            call mpi_recv(send_range(1), 2, MPI_INTEGER, send_rank, tag, D_comm(direction), rece_status, ierr)
+            send_range = send_range + proc_gap*mesh_sc%N_proc(direction)
+            ! II - Send it
+            ! IIa - Compute send tag
+            tag = compute_tag(ind_group, tag_velo_V, direction, proc_gap)
+            ! IIb - Send message
+            call mpi_Isend(p_V(send_range(1)), send_range(2)-send_range(1)+1, MPI_REAL_WP, &
+                    & send_rank, tag, D_comm(direction), s_request(proc_gap), ierr)
+        end if
+    end do
+
+    ! Non blocking reception of the velocity field
+    ind = 1
+    allocate(rece_request(rece_gap(1):rece_gap(2)))
+    do proc_gap = rece_gap(1), rece_gap(2)
+        if (rece_rank(proc_gap) /= D_rank(direction)) then
+            ! IIa - Compute reception tag
+            tag = compute_tag(ind_group, tag_velo_V, direction, -proc_gap)
+            ! IIb - Receive message
+            gap = proc_gap*mesh_sc%N_proc(direction)
+            rece_range(1) = max(rece_ind_min, gap+1) ! fortran => indice start from 0
+            rece_range(2) = min(rece_ind_max, gap+mesh_sc%N_proc(direction))
+            msg_size = rece_range(2)-rece_range(1)+1
+            call mpi_Irecv(V_buffer(ind), msg_size, MPI_REAL_WP, rece_rank(proc_gap), tag, D_comm(direction), &
+                        & rece_request(proc_gap), ierr)
+            ind = ind + msg_size
+        end if
+    end do
+
+    ! -- Compute the interpolated velocity
+    ! Compute the interpolation weight and update the pointers Vp and Vm
+    ! Initialisation of reccurence process
+    ind = 1
+    pos = floor(p_pos_bis(ind))
+    weight(ind) = p_pos_bis(ind)-pos
+    ! Vm = V(pos)
+    proc_gap = floor(real(pos-1, WP)/mesh_sc%N_proc(direction))
+    call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+    if (send_rank == D_rank(direction)) then
+        Vm(ind)%pter => p_V_bis(pos-proc_gap*mesh_sc%N_proc(direction))
+    else
+        ind_com = ind_com + 1
+        Vm(ind)%pter => V_buffer(ind_com)
+    end if
+    ! Vp = V(pos+1)
+    proc_gap = floor(real(pos+1-1, WP)/mesh_sc%N_proc(direction))
+    call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+    if (send_rank == D_rank(direction)) then
+        Vp(ind)%pter => p_V_bis(pos+1-proc_gap*mesh_sc%N_proc(direction))
+    else
+        ind_com = ind_com + 1
+        Vp(ind)%pter => V_buffer(ind_com)
+    end if
+    pos_old = pos
+
+    ! Following indice : we use previous work (already done)
+    do ind = 2, mesh_sc%N_proc(direction)
+        pos = floor(p_pos_bis(ind))
+        weight(ind) = p_pos_bis(ind)-pos
+        select case(pos-pos_old)
+            case(0)
+                ! The particle belongs to the same segment than the previous one
+                Vm(ind)%pter => Vm(ind-1)%pter
+                Vp(ind)%pter => Vp(ind-1)%pter
+            case(1)
+                ! The particle follows the previous one
+                Vm(ind)%pter => Vp(ind-1)%pter
+                ! Vp = V(pos+1)
+                proc_gap = floor(real(pos+1-1, WP)/mesh_sc%N_proc(direction)) ! fortran -> indice starts from 1
+                call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+                if (send_rank == D_rank(direction)) then
+                    Vp(ind)%pter => p_V_bis(pos+1-proc_gap*mesh_sc%N_proc(direction))
+                else
+                    ind_com = ind_com + 1
+                    Vp(ind)%pter => V_buffer(ind_com)
+                end if
+            case(2)
+                ! pos = pos_old +2, wich correspond to "extention"
+                ! Vm = V(pos)
+                proc_gap = floor(real(pos-1, WP)/mesh_sc%N_proc(direction))
+                call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+                if (send_rank == D_rank(direction)) then
+                    Vm(ind)%pter => p_V_bis(pos-proc_gap*mesh_sc%N_proc(direction))
+                else
+                    ind_com = ind_com + 1
+                    Vm(ind)%pter => V_buffer(ind_com)
+                end if
+                ! Vp = V(pos+1)
+                proc_gap = floor(real(pos+1-1, WP)/mesh_sc%N_proc(direction))
+                call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+                if (send_rank == D_rank(direction)) then
+                    Vp(ind)%pter => p_V_bis(pos+1-proc_gap*mesh_sc%N_proc(direction))
+                else
+                    ind_com = ind_com + 1
+                    Vp(ind)%pter => V_buffer(ind_com)
+                end if
+            case default
+                print*, "unexpected case : pos = ", pos, " , pos_old = ", pos_old, " ind = ", ind
+        end select
+        pos_old = pos
+    end do
+
+    ! -- Compute the interpolate velocity --
+    ! Check if communication are done
+    do proc_gap = rece_gap(1), rece_gap(2)
+        if (rece_rank(proc_gap)/=D_rank(direction)) then
+            call mpi_wait(rece_request(proc_gap), rece_status, ierr)
+        end if
+    end do
+
+
+    ! Then compute the field
+    do ind = 1, mesh_sc%N_proc(direction)
+        p_V(ind) = weight(ind)*Vp(ind)%pter + (1-weight(ind))*Vm(ind)%pter
+    end do
+
+    do ind = 1, mesh_sc%N_proc(direction)
+        nullify(Vp(ind)%pter)
+        nullify(Vm(ind)%pter)
+    end do
+
+    do proc_gap = send_gap(1), send_gap(2)
+        call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+        if (send_rank /= D_rank(direction)) then
+            call MPI_WAIT(s_request(proc_gap),rece_status,ierr)
+        end if
+    end do
+    deallocate(s_request)
+    do proc_gap = rece_gap(1), rece_gap(2)
+        if (rece_rank(proc_gap) /= D_rank(direction)) then
+            call MPI_WAIT(s_request_bis(proc_gap),rece_status,ierr)
+        end if
+    end do
+    deallocate(s_request_bis)
+
+    ! Deallocation
+    deallocate(rece_rank)
+    deallocate(rece_request)
+    deallocate(V_buffer)
+
+end subroutine AC_particle_velocity_line
+
+
+
+! ===================================================================================================
+! ====================     Others than velocity interpolation and remeshing      ====================
+! ===================================================================================================
+!> Determine type (center or left) of each block of a line and tag particle of this line to know where
+!! corrected remeshing formula are recquired.
+!!    @param[in]        dt          = time step
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        p_V         = particle velocity (along the current direction)
+!!    @param[out]       bl_type     = table of blocks type (center of left)
+!!    @param[out]       bl_tag      = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
+!!                                    and the begining of the following one is tagged)
+!! @details
+!!        This subroutine deals with a single line. For each line of this group, it
+!!    determine the type of each block of this line and where corrected remeshing
+!!    formula are required. In those points, it tagg block transition (ie the end of
+!!    the current block and the beginning of the following one) in order to indicate
+!!    that corrected weigth have to be used during the remeshing.
+subroutine AC_type_and_block_line(dt, direction, ind_group, p_V, &
+                    & bl_type, bl_tag)
+
+    
+    use cart_topology
+    use advec_variables
+    use precision_tools
+
+    ! In/Out variables
+    real(WP), intent(in)                                    :: dt           ! time step
+    integer, intent(in)                                     :: direction
+    integer, dimension(2), intent(in)                       :: ind_group
+    real(WP), dimension(:), intent(in)                      :: p_V
+    logical, dimension(bl_nb(direction)+1), intent(out)     :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction)), intent(out)       :: bl_tag       ! indice of tagged particles
+    ! Local variables
+    real(WP), dimension(bl_nb(direction)+1)                 :: bl_lambdaMin ! for a particle, lamda = V*dt/dx ;  bl_lambdaMin = min of
+                                                                            ! lambda on a block (take also into account first following particle)
+    real(WP)                                                :: lambP, lambN ! buffer to exchange some lambda min with other processus
+    integer, dimension(bl_nb(direction)+1)                  :: bl_ind       ! block index : integer as lambda in (bl_ind,bl_ind+1) for a left block
+                                                                            ! and lambda in (bl_ind-1/2, bl_ind+1/2) for a right block
+    integer                                                 :: ind, i_p     ! some indices
+    real(WP)                                                :: cfl          ! = d_sc
+    integer                                                 :: rankP, rankN ! processus rank for shift (P= previous, N = next)
+    integer, dimension(2)                                   :: send_request ! mpi status of nonblocking send
+    integer, dimension(2)                                   :: rece_request ! mpi status of nonblocking receive
+    integer, dimension(MPI_STATUS_SIZE)                     :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(MPI_STATUS_SIZE)                     :: send_status  ! mpi status (for mpi_wait)
+    integer, dimension(2)                                   :: tag_table    ! other tags for mpi message
+    integer                                                 :: ierr         ! mpi error code
+
+    ! ===== Initialisation =====
+    cfl = dt/mesh_sc%dx(direction)
+
+    ! ===== Compute bl_lambdaMin =====
+    ! -- Compute rank of my neighbor --
+    call mpi_cart_shift(D_comm(direction), 0, 1, rankP, rankN, ierr)
+
+    ! -- For the first block (1/2) --
+    ! The domain contains only its second half => exchange ghost with the previous processus
+    bl_lambdaMin(1) = minval(p_V(1:(bl_size/2)+1))*cfl
+    tag_table = compute_tag(ind_group, tag_part_tag_NP, direction)
+    ! Send message
+    call mpi_Isend(bl_lambdaMin(1), 1, MPI_REAL_WP, rankP, tag_table(1), D_comm(direction), send_request(1), ierr)
+    ! Receive it
+    call mpi_Irecv(lambN, 1, MPI_REAL_WP, rankN, tag_table(1), D_comm(direction), rece_request(1), ierr)
+
+    ! -- For the last block (1/2) --
+    ! The processus contains only its first half => exchange ghost with the next processus
+    ind = bl_nb(direction) + 1
+    bl_lambdaMin(ind) = minval(p_V(mesh_sc%N_proc(direction)-(bl_size/2)+1:mesh_sc%N_proc(direction)))*cfl
+    ! Send message
+    call mpi_Isend(bl_lambdaMin(ind), 1, MPI_REAL_WP, rankN, tag_table(2), D_comm(direction), send_request(2), ierr)
+    ! Receive it
+    call mpi_Irecv(lambP, 1, MPI_REAL_WP, rankP, tag_table(2), D_comm(direction), rece_request(2), ierr)
+
+    ! -- For the "middle" block --
+    do ind = 2, bl_nb(direction)
+        i_p = ((ind-1)*bl_size) + 1 - bl_size/2
+        bl_lambdaMin(ind) = minval(p_V(i_p:i_p+bl_size))*cfl
+    end do
+
+    ! -- For the first block (1/2) --
+    ! The domain contains only its second half => use exchanged ghost
+    ! Check reception
+    call mpi_wait(rece_request(2), rece_status, ierr)
+    bl_lambdaMin(1) = min(bl_lambdaMin(1), lambP)
+
+    ! -- For the last block (1/2) --
+    ! The processus contains only its first half => use exchanged ghost
+    ! Check reception
+    call mpi_wait(rece_request(1), rece_status, ierr)
+    ind = bl_nb(direction) + 1
+    bl_lambdaMin(ind) = min(bl_lambdaMin(ind), lambN)
+
+    ! ===== Compute block type and index =====
+    bl_ind = nint(bl_lambdaMin)
+    bl_type = (bl_lambdaMin<dble(bl_ind))
+
+
+    ! => center type if true, else left
+
+    ! ===== Tag particles =====
+    do ind = 1, bl_nb(direction)
+        bl_tag(ind) = ((bl_ind(ind)/=bl_ind(ind+1)) .and. (bl_type(ind).neqv.bl_type(ind+1)))
+    end do
+
+    call mpi_wait(send_request(1), send_status, ierr)
+    call mpi_wait(send_request(2), send_status, ierr)
+
+end subroutine AC_type_and_block_line
+
+
+! ===================================================================
+! ====================     Remesh particles      ====================
+! ===================================================================
+
+!> Determine the set of processes wich will send me information during the
+!!  scalar remeshing. Use implicit computation rather than communication (only
+!!  possible if particle are gather by block whith contrainst on velocity variation
+!!  - as corrected lambda formula.) - work on only a line of particles.
+!!    @param[in]    send_i_min  = minimal indice of the send buffer
+!!    @param[in]    send_i_max  = maximal indice of the send buffer
+!!    @param[in]    direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]    ind_group   = coordinate of the current group of lines
+!!    @param[out]   proc_min    = gap between my coordinate and the processes of minimal coordinate which will receive information from me
+!!    @param[out]   proc_max    = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+!!    @param[out]   rece_proc   = coordinate range of processes which will send me information during the remeshing.
+!! @details
+!!    Obtain the list of processus which contains some particles which belong to
+!!    my subdomains after their advection (and thus which will be remeshing into
+!!    my subdomain). This result is return as an interval [send_min; send_max].
+!!    All the processus whose coordinate (into the current direction) belong to
+!!    this segment are involved into scalar remeshing into the current
+!!    subdomains. This routine does not involve any computation to determine if
+!!    a processus is the first or the last processes (considering its coordinate along
+!!    the current directory) to send remeshing information to a given processes.
+!!    It directly compute it using contraints on velocity (as in corrected lambda
+!!    scheme) When possible use it rather than AC_obtain_senders_com
+subroutine AC_obtain_senders_line(send_i_min, send_i_max, direction, ind_group, proc_min, proc_max, rece_proc)
+! XXX Work only for periodic condition. For dirichlet conditions : it is
+! possible to not receive either rece_proc(1), either rece_proc(2) or none of
+! these two => detect it (track the first and the last particles) and deal with it.
+
+    use cart_topology   ! info about mesh and mpi topology
+    
+    use advec_variables
+
+    ! Input/output
+    integer, intent(in)                 :: send_i_min
+    integer, intent(in)                 :: send_i_max
+    integer, intent(in)                 :: direction
+    integer, dimension(2), intent(in)   :: ind_group
+    integer(kind=4), intent(out)        :: proc_min, proc_max
+    integer, dimension(2), intent(out)  :: rece_proc
+    integer, dimension(MPI_STATUS_SIZE) :: statut
+    ! Other local variable
+    integer(kind=4)                     :: proc_gap         ! gap between a processus coordinate (along the current
+                                                            ! direction) into the mpi-topology and my coordinate
+    integer                             :: rankP, rankN     ! processus rank for shift (P= previous, N = next)
+    integer, dimension(2)               :: tag_table        ! mpi message tag (for communicate rece_proc(1) and rece_proc(2))
+    integer, dimension(:,:),allocatable :: send_request     ! mpi status of nonblocking send
+    integer                             :: ierr             ! mpi error code
+
+    tag_table = compute_tag(ind_group, tag_obtsend_NP, direction)
+
+    rece_proc = 3*mesh_sc%N(direction)
+
+    proc_min = floor(real(send_i_min-1, WP)/mesh_sc%N_proc(direction))
+    proc_max = floor(real(send_i_max-1, WP)/mesh_sc%N_proc(direction))
+
+    allocate(send_request(proc_min:proc_max,3))
+    send_request(:,3) = 0
+
+    ! Send
+    do proc_gap = proc_min, proc_max
+        ! Compute the rank of the target processus
+        call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
+        ! Determine if I am the the first or the last processes (considering my
+                ! coordinate along the current directory) to send information to
+                ! one of these processes.
+                ! Note that local indice go from 1 to mesh_sc%N_proc (fortran).
+        ! I am the first ?
+        if ((send_i_min< +1-2*bl_bound_size + proc_gap*mesh_sc%N_proc(direction)+1).AND. &
+                    & (send_i_max>= proc_gap*mesh_sc%N_proc(direction))) then
+            if(rankN /= D_rank(direction)) then
+                call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(1), D_comm(direction), &
+                        & send_request(proc_gap,1), ierr)
+                send_request(proc_gap,3) = 1
+            else
+                rece_proc(1) = -proc_gap
+            end if
+        end if
+        ! I am the last ?
+        if ((send_i_max > -1+2*bl_bound_size + (proc_gap+1)*mesh_sc%N_proc(direction)) &
+                    & .AND.(send_i_min<= (proc_gap+1)*mesh_sc%N_proc(direction))) then
+            if(rankN /= D_rank(direction)) then
+                call mpi_Isend(-proc_gap, 1, MPI_INTEGER, rankN, tag_table(2), D_comm(direction), &
+                        & send_request(proc_gap,2), ierr)
+                send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
+            else
+                rece_proc(2) = -proc_gap
+            end if
+        end if
+    end do
+
+
+    ! Receive
+    if (rece_proc(1) == 3*mesh_sc%N(direction)) then
+        call mpi_recv(rece_proc(1), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+    end if
+    if (rece_proc(2) == 3**mesh_sc%N(direction)) then
+        call mpi_recv(rece_proc(2), 1, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+    end if
+
+    ! Free Isend buffer
+    do proc_gap = proc_min, proc_max
+        select case (send_request(proc_gap,3))
+            case (3)
+                call mpi_wait(send_request(proc_gap,1), statut, ierr)
+                call mpi_wait(send_request(proc_gap,2), statut, ierr)
+            case (2)
+                call mpi_wait(send_request(proc_gap,2), statut, ierr)
+            case (1)
+                call mpi_wait(send_request(proc_gap,1), statut, ierr)
+        end select
+    end do
+
+    deallocate(send_request)
+
+end subroutine AC_obtain_senders_line
+
+
+!> Common procedure for remeshing wich perform all the communcation and provide
+!! the update scalar field.
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        send_i_min  = minimal indice of the send buffer
+!!    @param[in]        send_i_max  = maximal indice of the send buffer
+!!    @param[out]       proc_min    = gap between my coordinate and the processes of minimal coordinate which will receive information from me
+!!    @param[out]       proc_max    = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+!!    @param[out]       rece_proc   = coordinate range of processes which will send me information during the remeshing.
+!!    @param[in]        send_buffer = buffer use to remesh the scalar before to send it to the right subdomain
+!!    @param[in,out]    scal1D      = mono-dimensionnal scalar field to advect
+!! @details
+!!    Remeshing are done in a local buffer. This subroutine distribute this buffer
+!!    to the right processes, receive the buffer send and update the scalar field.
+subroutine AC_bufferToScalar_line(direction, ind_group, send_i_min, send_i_max, proc_min, proc_max, rece_proc,send_buffer, scal1D)
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+    
+
+    ! Input/Ouptut
+    integer, intent(in)                                     :: direction
+    integer, dimension(2), intent(in)                       :: ind_group
+    integer, intent(in)                                     :: send_i_min
+    integer, intent(in)                                     :: send_i_max
+    integer, dimension(2), intent(in)                       :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the
+                                                                            ! one from which  I will receive data
+    integer, intent(in)                                     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, intent(in)                                     :: proc_max     ! smaller gap between me and the processes to where I send data
+    real(WP), dimension(send_i_min:send_i_max), intent(in)  :: send_buffer
+    real(WP), dimension(mesh_sc%N_proc(direction)), intent(inout)   :: scal1D
+
+    ! Variables used to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+    ! design something I send (resp. I receive).
+    integer                             :: i            ! table indice
+    integer                             :: proc_gap     ! gap between my Y-coordinate and the one of the processus
+    real(WP), dimension(:), allocatable :: rece_buffer  ! buffer use to stock received scalar field
+    integer                             :: send_gap     ! number of mesh between my and another processus
+    integer,dimension(:,:), allocatable :: rece_range   ! range of (local) indice where the received scalar field has to be save
+    integer,dimension(:,:), allocatable :: send_range   ! range of (local) indice where the send scalar field has to be save))
+    integer, dimension(:), allocatable  :: rece_request ! mpi communication request (handle) of nonblocking receive
+    integer, dimension(:), allocatable  :: rece_rank    ! rank of processus from wich I receive data
+    integer                             :: send_rank    ! rank of processus to which I send data
+    integer                             :: rankP        ! rank used in mpi_cart_shift
+    integer, dimension(MPI_STATUS_SIZE) :: rece_status  ! mpi status (for mpi_wait)
+    integer, dimension(MPI_STATUS_SIZE) :: send_status  ! mpi status (for mpi_wait)
+    integer, dimension(:,:),allocatable :: send_request ! mpi status of nonblocking send
+    integer                             :: rece_i_min   ! the minimal indice from where belong the scalar field I receive
+    integer                             :: rece_i_max   ! the maximal indice from where belong the scalar field I receive
+    integer                             :: ierr         ! mpi error code
+    integer                             :: comm_size    ! number of element to send/receive
+    integer                             :: tag          ! mpi message tag
+                                                        ! with wich I communicate.
+
+    ! ===== Receive information =====
+    ! -- Allocate field --
+    allocate(rece_rank(rece_proc(1):rece_proc(2)))
+    allocate(rece_range(2,rece_proc(1):rece_proc(2)))  ! be careful that mpi use contiguous memory element
+    allocate(rece_request(rece_proc(1):rece_proc(2)))
+    ! -- Receive range --
+    do proc_gap = rece_proc(1), rece_proc(2)
+        call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rece_rank(proc_gap), ierr)
+        if (rece_rank(proc_gap)/=D_rank(direction)) then
+            tag = compute_tag(ind_group, tag_bufToScal_range, direction, -proc_gap)
+            call mpi_Irecv(rece_range(1,proc_gap), 2, MPI_INTEGER, rece_rank(proc_gap), tag, D_comm(direction), &
+                        & rece_request(proc_gap), ierr) ! we use tag = source rank
+        end if
+    end do
+
+    ! Send the information
+    allocate(send_request(proc_min:proc_max,3))
+    send_request(:,3)=0
+    allocate(send_range(2,proc_min:proc_max))
+    do proc_gap = proc_min, proc_max
+            ! Compute the rank of the target processus
+            call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank, ierr)
+            send_gap = proc_gap*mesh_sc%N_proc(direction)
+            send_range(1, proc_gap) = max(send_i_min, send_gap+1) ! fortran => indice start from 0
+            send_range(2, proc_gap) = min(send_i_max, send_gap+mesh_sc%N_proc(direction))
+        if (send_rank/=D_rank(direction)) then
+            ! Determine quantity of information to send
+            comm_size = send_range(2, proc_gap)-send_range(1, proc_gap)+1
+            ! Send the range of the scalar field send
+            tag = compute_tag(ind_group, tag_bufToScal_range, direction, proc_gap)
+            call mpi_ISsend(send_range(1, proc_gap), 2, MPI_INTEGER, send_rank, tag, D_comm(direction), send_request(proc_gap,1)&
+                    & , ierr)
+            ! And send the buffer
+            tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, proc_gap)
+            call mpi_ISsend(send_buffer(send_range(1,proc_gap)),comm_size, MPI_REAL_WP, send_rank, &
+                        & tag, D_comm(direction), send_request(proc_gap,2), ierr)
+            send_request(proc_gap,3) = 1
+        else
+            ! I have to distribute the buffer in myself
+            do i = send_range(1, proc_gap), send_range(2, proc_gap)
+                scal1D(i-send_gap) = scal1D(i-send_gap) + send_buffer(i)
+            end do
+        end if
+    end do
+
+    ! Check reception
+    do proc_gap = rece_proc(1), rece_proc(2)
+        if (rece_rank(proc_gap)/=D_rank(direction)) then
+            call mpi_wait(rece_request(proc_gap), rece_status, ierr)
+        end if
+    end do
+    deallocate(rece_request)
+    ! Receive buffer and remesh it
+        ! XXX Possible optimisation : an optimal code will
+        !   1 - have non-blocking reception of scalar buffers
+        !   2 - check when a reception is done and then update the scalar
+        !   3 - iterate step 2 until all message was rece and that the scalar
+        !       field was update with all the scalar buffers
+    do proc_gap = rece_proc(1), rece_proc(2)
+        if (rece_rank(proc_gap)/=D_rank(direction)) then
+            rece_i_min = rece_range(1,proc_gap)
+            rece_i_max = rece_range(2,proc_gap)
+            ! Receive information
+            comm_size=(rece_i_max-rece_i_min+1)
+            allocate(rece_buffer(rece_i_min:rece_i_max)) ! XXX possible optimisation
+                ! by allocating one time to the max size, note that the range use in
+                ! this allocation instruction is include in (1, mesh_sc%N_proc(2))
+            tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, -proc_gap)
+            call mpi_recv(rece_buffer(rece_i_min), comm_size, MPI_REAL_WP, &
+                    & rece_rank(proc_gap), tag, D_comm(direction), rece_status, ierr)
+            ! Update the scalar field
+            send_gap = proc_gap*mesh_sc%N_proc(direction)
+            scal1D(rece_i_min+send_gap:rece_i_max+send_gap) = scal1D(rece_i_min+send_gap:rece_i_max+send_gap) &
+                & + rece_buffer(rece_i_min:rece_i_max)
+            deallocate(rece_buffer)
+        end if
+    end do
+
+
+    ! Free Isend buffer
+    do proc_gap = proc_min, proc_max
+        if (send_request(proc_gap,3)/=0) then
+            call mpi_wait(send_request(proc_gap,1), send_status, ierr)
+            call mpi_wait(send_request(proc_gap,2), send_status, ierr)
+        end if
+    end do
+    deallocate(send_request)
+    deallocate(send_range)
+
+    deallocate(rece_range)
+    deallocate(rece_rank)
+
+end subroutine AC_bufferToScalar_line
+
+
+
+end module advec_common_line
diff --git a/HySoP/src/scalesReduced/particles/advec_line/advec_remesh_formula_line.f90 b/HySoP/src/scalesReduced/particles/advec_line/advec_remesh_formula_line.f90
new file mode 100644
index 0000000000000000000000000000000000000000..8bd63c6461f4af7a142167ddf8096037129986d4
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_line/advec_remesh_formula_line.f90
@@ -0,0 +1,747 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_remeshing_line
+!
+!
+! DESCRIPTION:
+!> This module gathers all the remeshing formula. These interpolation
+!!polynom allow to re-distribute particles on mesh grid at each
+!! iterations. - old version for advection without group of line
+!! @details
+!! It provides lambda 2 corrected, lambda 4 corrected and M'6 remeshing formula.
+!! The remeshing of type "lambda corrected" are design for large time
+!! step. The M'6 formula appears as being stable for large time step, but
+!! the numerical analysis remains todo.
+!!     This module also provide some wraper to remesh a complete line
+!! of particles (with the different formula) and to do it either on a
+!! array or into a array of pointer to reals. In order to gather
+!! communications between different lines of particles, it is better to
+!! use continguous memory space for mesh point with belong to the same
+!! processes and thus to use and array of pointer to easily deal with it.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_remeshing_line
+
+    public AC_remesh_lambda4corrected_basic
+    public AC_remesh_lambda2corrected_basic
+
+
+
+contains
+
+!> Remesh particle line with corrected lambda 2 formula - remeshing is done into
+!! an real array
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        scal1D      = scalar field to advect
+!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+!!    @param[in]        ind_min     = minimal indice of the send buffer
+!!    @param[in]        ind_max     = maximal indice of the send buffer
+!!    @param[in, out]   send_buffer = buffer use to remesh the scalar before to send it to the right subdomain
+!! @details
+!!     Use corrected lambda 2 remeshing formula.
+!! This remeshing formula depends on the particle type :
+!!     1 - Is the particle tagged ?
+!!     2 - Does it belong to a centered or a left block ?
+!! Observe that tagged particles go by group of two : if the particles of a
+!! block end are tagged, the one first one of the following block are
+!! tagged too.
+!! The following algorithm is write for block of minimal size.
+!! @author = Jean-Baptiste Lagaert, LEGI/Ljk
+subroutine AC_remesh_lambda2corrected_basic(direction, p_pos_adim, scal1D, bl_type, bl_tag, ind_min, ind_max, send_buffer)
+
+    use cart_topology   ! Description of mesh and of mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                                 :: direction
+    real(WP), dimension(:), intent(in)                  :: p_pos_adim
+    real(WP), dimension(mesh_sc%N_proc(direction)), intent(in)  :: scal1D
+    logical, dimension(:), intent(in)                   :: bl_type
+    logical, dimension(:), intent(in)                   :: bl_tag
+    integer, intent(in)                                 :: ind_min, ind_max
+    real(WP), dimension(ind_min:ind_max), intent(inout) :: send_buffer
+    ! Other local variables
+    integer     :: bl_ind                               ! indice of the current "block end".
+    integer     :: p_ind                                ! indice of the current particle
+
+    send_j_min = ind_min
+    send_j_max = ind_max
+
+    do p_ind = 1, mesh_sc%N_proc(direction), bl_size
+        bl_ind = p_ind/bl_size + 1
+        if (bl_tag(bl_ind)) then
+            ! Tag case
+                ! XXX Debug : to activate only in purpose debug
+                !if (bl_type(ind).neqv. (.not. bl_type(ind+1))) then
+                !    write(*,'(a,x,3(L1,x),a,3(i0,a))'), 'error on remeshing particles: (tag,type(i), type(i+1)) =', &
+                !    & bl_tag(ind), bl_type(ind), bl_type(ind+1), ' and type must be different. Mesh point = (',i, ', ', j,', ',k,')'
+                !    write(*,'(a,x,i0)'),  'paramètres du blocs : ind =', bl_ind
+                !    stop
+                !end if
+                ! XXX Debug - end
+            if (bl_type(bl_ind)) then
+                ! tagged, the first particle belong to a centered block and the last to left block.
+                call AC_remesh_tag_CL(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
+            else
+                ! tagged, the first particle belong to a left block and the last to centered block.
+                call AC_remesh_tag_LC(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), send_buffer)
+            end if
+        else
+            if (bl_type(bl_ind)) then
+                ! First particle is remeshed with center formula
+                call AC_remesh_center(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
+            else
+                ! First particle is remeshed with left formula
+                call AC_remesh_left(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
+            end if
+            if (bl_type(bl_ind+1)) then
+                ! Second particle is remeshed with center formula
+                call AC_remesh_center(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
+            else
+                ! Second particle is remeshed with left formula
+                call AC_remesh_left(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
+            end if
+        end if
+    end do
+
+end subroutine AC_remesh_lambda2corrected_basic
+
+
+!> Remesh particle line with corrected lambda 4 formula - array version
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        scal1D      = scalar field to advect
+!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+!!    @param[in]        ind_min     = minimal indice of the send buffer
+!!    @param[in]        ind_max     = maximal indice of the send buffer
+!!    @param[in, out]   send_buffer = buffer use to remesh the scalar before to send it to the right subdomain
+!! @details
+!!     Use corrected lambda 2 remeshing formula.
+!! This remeshing formula depends on the particle type :
+!!     1 - Is the particle tagged ?
+!!     2 - Does it belong to a centered or a left block ?
+!! Observe that tagged particles go by group of two : if the particles of a
+!! block end are tagged, the one first one of the following block are
+!! tagged too.
+!! The following algorithm is write for block of minimal size.
+!! @author = Jean-Baptiste Lagaert, LEGI/Ljk
+subroutine AC_remesh_lambda4corrected_basic(direction, p_pos_adim, scal1D, bl_type, bl_tag, ind_min, ind_max, send_buffer)
+
+    use cart_topology   ! Description of mesh and of mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                                 :: direction
+    real(WP), dimension(:), intent(in)                  :: p_pos_adim
+    real(WP), dimension(mesh_sc%N_proc(direction)), intent(in)  :: scal1D
+    logical, dimension(:), intent(in)                   :: bl_type
+    logical, dimension(:), intent(in)                   :: bl_tag
+    integer, intent(in)                                 :: ind_min, ind_max
+    real(WP), dimension(ind_min:ind_max), intent(inout) :: send_buffer
+    ! Other local variables
+    integer     :: bl_ind                               ! indice of the current "block end".
+    integer     :: p_ind                                ! indice of the current particle
+
+    send_j_min = ind_min
+    send_j_max = ind_max
+
+    do p_ind = 1, mesh_sc%N_proc(direction), bl_size
+        bl_ind = p_ind/bl_size + 1
+        if (bl_tag(bl_ind)) then
+            ! Tagged case
+            if (bl_type(bl_ind)) then
+                ! tagged, the first particle belong to a centered block and the last to left block.
+                call AC_remesh_O4_tag_CL(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), &
+                        & p_pos_adim(p_ind+2), scal1D(p_ind+2), p_pos_adim(p_ind+3), scal1D(p_ind+3), send_buffer)
+            else
+                ! tagged, the first particle belong to a left block and the last to centered block.
+                call AC_remesh_O4_tag_LC(p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), &
+                        & p_pos_adim(p_ind+2), scal1D(p_ind+2), p_pos_adim(p_ind+3), scal1D(p_ind+3), send_buffer)
+            end if
+        else
+            ! No tag
+            if (bl_type(bl_ind)) then
+                call AC_remesh_O4_center(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
+                call AC_remesh_O4_center(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
+            else
+                call AC_remesh_O4_left(p_pos_adim(p_ind),scal1D(p_ind), send_buffer)
+                call AC_remesh_O4_left(p_pos_adim(p_ind+1),scal1D(p_ind+1), send_buffer)
+            end if
+            if (bl_type(bl_ind+1)) then
+                call AC_remesh_O4_center(p_pos_adim(p_ind+2),scal1D(p_ind+2), send_buffer)
+                call AC_remesh_O4_center(p_pos_adim(p_ind+3),scal1D(p_ind+3), send_buffer)
+            else
+                call AC_remesh_O4_left(p_pos_adim(p_ind+2),scal1D(p_ind+2), send_buffer)
+                call AC_remesh_O4_left(p_pos_adim(p_ind+3),scal1D(p_ind+3), send_buffer)
+            end if
+        end if
+    end do
+
+end subroutine AC_remesh_lambda4corrected_basic
+
+
+!> Left remeshing formula of order 2
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_left(pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                                        :: pos_adim, sca
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Ohter local variables
+    integer     :: j0                       ! indice of the the nearest mesh points
+    real(WP)    :: bM, b0, bP               ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+    !j0 = floor(pos/d_sc(2))
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    bM=0.5*y0*(y0-1.)
+    b0=1.-y0**2
+    !bP=0.5*y0*(y0+1.)
+    bP=1. - (b0+bM)
+
+    ! remeshing
+    buffer(j0-1) = buffer(j0-1)   + bM*sca
+    buffer(j0)   = buffer(j0)     + b0*sca
+    buffer(j0+1) = buffer(j0+1)   + bP*sca
+
+end subroutine AC_remesh_left
+
+
+!> Centered remeshing formula of order 2
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_center(pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/output
+    real(WP), intent(in)                                        :: pos_adim, sca
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: j0                       ! indice of the the nearest mesh points
+    real(WP)    :: bM, b0, bP               ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    j0 = nint(pos_adim)
+    !j0 = nint(pos/d_sc(2))
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+    !y0 = (pos - dble(j0)*d_sc(2))/d_sc(2)
+
+    ! Interpolation weights
+    bM=0.5*y0*(y0-1.)
+    b0=1.-y0**2
+    !bP=0.5*y0*(y0+1.)
+    bP=1. -b0 - bM
+
+    ! remeshing
+    buffer(j0-1) = buffer(j0-1)   + bM*sca
+    buffer(j0)   = buffer(j0)     + b0*sca
+    buffer(j0+1) = buffer(j0+1)   + bP*sca
+
+end subroutine AC_remesh_center
+
+
+!> Corrected remeshing formula for transition from Centered block to a Left block with a different indice (tagged particles)
+!!    @param[in]       pos_adim= adimensionned particle position
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the second particle
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a centered block
+!!    and the last to a left one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_tag_CL(pos_adim, sca, posP_ad, scaP, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    real(WP), intent(in)                                        :: pos_adim, sca, posP_ad, scaP
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP               ! indice of the the nearest mesh points
+                                            ! (they depend on the block type)
+    integer     :: j0_bis                   ! indice of the the nearest mesh point for the indP=ind+1 particle
+    real(WP)    :: aM, a0, bP, b0           ! interpolation weight for the particles
+    real(WP)    :: y0, y0_bis               ! adimensionned distance to mesh points
+
+    j0 = nint(pos_adim)
+    !j0 = nint(pos/d_sc(2))
+    j0_bis = floor(posP_ad)
+    !j0_bis = floor(posP/d_sc(2))
+    jM=j0-1
+    jP=j0+1
+
+    y0 = (pos_adim - real(j0, WP))
+    !y0 = (pos - dble(j0)*d_sc(2))/d_sc(2)
+    y0_bis = (posP_ad - real(j0_bis, WP))
+    !y0_bis = (posP - dble(j0_bis)*d_sc(2))/d_sc(2)
+
+    aM=0.5*y0*(y0-1)
+    a0=1.-aM
+    bP=0.5*y0_bis*(y0_bis+1.)
+    b0=1.-bP
+
+    ! Remeshing
+    buffer(jM)=buffer(jM)+aM*sca
+    buffer(j0)=buffer(j0)+a0*sca+b0*scaP
+    buffer(jP)=buffer(jP)+bP*scaP
+
+end subroutine AC_remesh_tag_CL
+
+
+!> Corrected remeshing formula for transition from Left block to a Centered  block with a different indice (tagged particles)
+!!    @param[in]       pos_adim= adimensionned particle position
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the second particle
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a left block
+!!    and the last to a centered one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_tag_LC(pos_adim, sca, posP_ad, scaP, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    real(WP), intent(in)                                        :: pos_adim, sca, posP_ad, scaP
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP, jP2, jP3             ! indice of the the nearest mesh points
+                                                    ! (they depend on the block type)
+    integer     :: j0_bis                           ! indice of the the nearest mesh point for the indP=ind+1 particle
+    real(WP)    :: aM, a0, aP,aP2, b0, bP, bP2, bP3 ! interpolation weight for the particles
+    real(WP)    :: y0, y0_bis                       ! adimensionned distance to mesh points
+
+
+    ! Indice of mesh point used in order to remesh
+    j0 = floor(pos_adim)
+    !j0 = floor(pos/d_sc(2))
+    j0_bis = nint(posP_ad)
+    !j0_bis = nint(posP/d_sc(2))
+    jM=j0-1
+    jP=j0+1
+    jP2=j0+2
+    jP3=j0+3
+
+    ! Distance to mesh point
+    y0 = (pos_adim - real(j0, WP))
+    !y0 = (pos - dble(j0)*d_sc(2))/d_sc(2)
+    y0_bis = (posP_ad - real(j0_bis, WP))
+    !y0_bis = (posP - dble(j0_bis)*d_sc(2))/d_sc(2)
+
+    ! Interpolation weight
+    a0=1-y0**2
+    aP=y0
+    !aM=y0*yM/2.
+    aM = 0.5-(a0+aP)/2.
+    aP2=aM
+    bP=-y0_bis
+    bP2=1-y0_bis**2
+    !b0=y0_bis*yP_bis/2.
+    b0 = 0.5-(bP+bP2)/2.
+    bP3=b0
+
+    ! Remeshing
+    buffer(jM)= buffer(jM)+aM*sca
+    buffer(j0)= buffer(j0)+a0*sca+b0*scaP
+    buffer(jP)= buffer(jP)+aP*sca+bP*scaP
+    buffer(jP2)=buffer(jP2)+aP2*sca+bP2*scaP
+    buffer(jP3)=buffer(jP3)+bP3*scaP
+
+end subroutine AC_remesh_tag_LC
+
+
+!> Left remeshing formula of order 4
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_O4_left(pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                                        :: pos_adim, sca
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Ohter local variables
+    integer     :: j0                       ! indice of the the nearest mesh points
+    real(WP)    :: bM2, bM, b0, bP, bP2     ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+    !j0 = floor(pos/d_sc(2))
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2.+y0*(-1.+y0*(-2.+y0)))/24.0
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4.+y0*(4.+y0*(1.-y0)))/6.0
+    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0
+    bP =y0*(4+y0*(4-y0*(1.+y0)))/6.0
+    !bP2=(y0-1.)*y0*(y0+1.)*(y0+2.)/24.0
+    bP2=y0*(-2.+y0*(-1.+y0*(2.+y0)))/24.0
+    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
+    b0 = 1. -(bM2+bM+bP+bP2)
+
+    ! remeshing
+    buffer(j0-2) = buffer(j0-2)   + bM2*sca
+    buffer(j0-1) = buffer(j0-1)   + bM*sca
+    buffer(j0)   = buffer(j0)     + b0*sca
+    buffer(j0+1) = buffer(j0+1)   + bP*sca
+    buffer(j0+2) = buffer(j0+2)   + bP2*sca
+
+end subroutine AC_remesh_O4_left
+
+
+!> Centered remeshing formula of order 4 - array version
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_O4_center(pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/output
+    real(WP), intent(in)                                        :: pos_adim, sca
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: j0                       ! indice of the the nearest mesh points
+    real(WP)    :: bM2, bM, b0, bP, bP2     ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+    ! Mesh point used in remeshing formula
+    j0 = nint(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2._WP+y0*(-1.+y0*(-2._WP+y0)))/24._WP
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4._WP+y0*(4._WP+y0*(1._WP-y0)))/6._WP
+    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0
+    bP =y0*(4._WP+y0*(4._WP-y0*(1._WP+y0)))/6._WP
+    !bP2=(y0-1.)*y0*(y0+1.)*(y0+2.)/24.0
+    bP2=y0*(-2._WP+y0*(-1._WP+y0*(2._WP+y0)))/24._WP
+    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
+    b0 = 1._WP -(bM2+bM+bP+bP2)
+
+    ! remeshing
+    buffer(j0-2) = buffer(j0-2)   + bM2*sca
+    buffer(j0-1) = buffer(j0-1)   + bM*sca
+    buffer(j0)   = buffer(j0)     + b0*sca
+    buffer(j0+1) = buffer(j0+1)   + bP*sca
+    buffer(j0+2) = buffer(j0+2)   + bP2*sca
+
+end subroutine AC_remesh_O4_center
+
+
+!> Order 4 corrected remeshing formula for transition from Centered block to a Left block with a different indice (tagged particles)
+!! - version for array of real.
+!!    @param[in]       posM_ad = adimensionned position of the first particle
+!!    @param[in]       scaM    = scalar advected by the first particle
+!!    @param[in]       pos_adim= adimensionned particle position
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the second particle
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in]       posP2_ad= adimensionned position of the fourth (and last) particle
+!!    @param[in]       scaP2   = scalar advected by this particle
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a centered block
+!!    and the last to a left one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_O4_tag_CL(posM_ad, scaM, pos_adim, sca, posP_ad, scaP, posP2_ad, scaP2, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    real(WP), intent(in)                                        :: pos_adim, sca, posP_ad, scaP
+    real(WP), intent(in)                                        :: posM_ad, scaM, posP2_ad, scaP2
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP, jP2          ! indice of the the nearest mesh points
+                                            ! (they depend on the block type)
+    real(WP)    :: aM3, aM2, aM, a0         ! interpolation weight for the particles
+    real(WP)    :: bM2, bM, b0, bP          ! interpolation weight for the particles
+    real(WP)    :: cM, c0, cP, cP2          ! interpolation weight for the particles
+    real(WP)    :: e0, eP, eP2, eP3         ! interpolation weight for the particles
+    real(WP)    :: yM, y0, yP, yP2          ! adimensionned distance to mesh points for each particles
+
+    ! Indice of mesh point used in order to remesh
+    jM = nint(posM_ad)
+    j0 = nint(pos_adim)
+    jP = floor(posP_ad)
+    jP2= floor(posP2_ad)
+
+    ! Distance to mesh point
+    yM = (posM_ad  - real(jM, WP))
+    y0 = (pos_adim - real(j0, WP))
+    yP = (posP_ad  - real(jP, WP))
+    yP2= (posP2_ad - real(jP2, WP))
+
+    ! Interpolation weights
+    !aM3=(yM-2.)*(yM-1.)*yM*(yM+1.)/24.0
+    aM3=yM*(2.+yM*(-1.+yM*(-2.+yM)))/24.0
+    !aM2=(2.-yM)*(yM-1.)*yM*(yM+2.)/6.0
+    aM2=yM*(-4.+yM*(4.+yM*(1.-yM)))/6.0
+    !aM =(yM-2.)*(yM-1.)*(yM+1.)*(yM+2.)/4.0
+    aM =(4.+(yM**2)*(-5.+yM**2))/4.0
+    !a0 =((2.-yM)*yM*(yM+1.)*(yM+2.)/6.0) + ((yM-1.)*yM*(yM+1.)*(yM+2.)/24.0)
+    a0 = 1. - (aM3+aM2+aM)
+
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2.+y0*(-1.+y0*(-2.+y0)))/24.0
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4.+y0*(4.+y0*(1.-y0)))/6.0
+    !bP =((y0+1)-1.)*(y0+1)*((y0+1)+1.)*((y0+1)+2.)/24.0
+    bP =y0*(6.+y0*(11+y0*(6+y0)))/24.0
+    !b0 =((y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0) + ((2.-y0)*y0*(y0+1.)*(y0+2.)/6.0) &
+    !        & + ((y0-1.)*y0*(y0+1.)*(y0+2.)/24.0) - bP
+    b0 = 1. - (bM2+bM+bP)
+
+    !cM =((yP-1.)-2.)*((yP-1.)-1.)*(yP-1.)*((yP-1.)+1.)/24.0
+    cM =yP*(-6.+yP*(11.+yP*(-6.+yP)))/24.0
+    !cP =(2.-yP)*yP*(yP+1.)*(yP+2.)/6.0
+    cP =yP*(4.+yP*(4.-yP*(1.+yP)))/6.0
+    !cP2=(yP-1.)*yP*(yP+1.)*(yP+2.)/24.0
+    cP2=yP*(-2.+yP*(-1.+yP*(2.+yP)))/24.0
+    !c0 =((yP-2.)*(yP-1.)*yP*(yP+1.)/24.0)+((2.-yP)*(yP-1.)*yP*(yP+2.)/6.0) &
+    !        & + ((yP-2.)*(yP-1.)*(yP+1.)*(yP+2.)/4.0) - cM
+    c0 = 1. - (cM+cP+cP2)
+
+    !eP =(yP2-2.)*(yP2-1.)*(yP2+1.)*(yP2+2.)/4.0
+    eP =1.+((yP2**2)*(-5+yP2**2)/4.0)
+    !eP2=(2.-yP2)*yP2*(yP2+1.)*(yP2+2.)/6.0
+    eP2=yP2*(4.+yP2*(4.-yP2*(1+yP2)))/6.0
+    !eP3=(yP2-1.)*yP2*(yP2+1.)*(yP2+2.)/24.0
+    eP3=yP2*(-2.+yP2*(-1.+yP2*(2+yP2)))/24.0
+    !e0 =((yP2-2.)*(yP2-1.)*yP2*(yP2+1.)/24.0) + ((2.-yP2)*(yP2-1.)*yP2*(yP2+2.)/6.0)
+    e0 = 1. - (eP+eP2+eP3)
+
+    ! remeshing
+    buffer(j0-3) = buffer(j0-3)   +aM3*scaM
+    buffer(j0-2) = buffer(j0-2)   +aM2*scaM +bM2*sca
+    buffer(j0-1) = buffer(j0-1)   + aM*scaM + bM*sca  + cM*scaP
+    buffer(j0)   = buffer(j0)     + a0*scaM + b0*sca  + c0*scaP + e0*scaP2
+    buffer(j0+1) = buffer(j0+1)             + bP*sca  + cP*scaP + eP*scaP2
+    buffer(j0+2) = buffer(j0+2)                       +cP2*scaP +eP2*scaP2
+    buffer(j0+3) = buffer(j0+3)                                 +eP3*scaP2
+
+end subroutine AC_remesh_O4_tag_CL
+
+
+!> Corrected remeshing formula of order 3 for transition from Left block to a centered
+!! block with a different indice (tagged particles). Use it for lambda 4 corrected scheme.
+!! - version for array of real.
+!!    @param[in]       posM_ad = adimensionned position of the first particle
+!!    @param[in]       scaM    = scalar advected by the first particle
+!!    @param[in]       pos_adim= adimensionned position of the second particle (the last of the first block)
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the third particle (wich is the first of the second block)
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in]       posP2_ad= adimensionned position of the fourth (and last) particle
+!!    @param[in]       scaP2   = scalar advected by this particle
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a left block
+!!    and the last to a centered one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_O4_tag_LC(posM_ad, scaM, pos_adim, sca, posP_ad, scaP, posP2_ad, scaP2, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    real(WP), intent(in)                                        :: pos_adim, sca, posP_ad, scaP
+    real(WP), intent(in)                                        :: posM_ad, scaM, posP2_ad, scaP2
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP, jP2          ! indice of the the nearest mesh points
+                                            ! (they depend on the block type)
+    real(WP)    :: aM3, aM2, aM, a0, aP,aP2 ! interpolation weight for the particles
+    real(WP)    :: bM2, bM, b0, bP, bP2,bP3 ! interpolation weight for the particles
+    real(WP)    :: cM, c0, cP, cP2, cP3,cP4 ! interpolation weight for the particles
+    real(WP)    :: e0, eP, eP2, eP3,eP4,ep5 ! interpolation weight for the particles
+    real(WP)    :: yM, y0, yP, yP2          ! adimensionned distance to mesh points for each particles
+
+
+    ! Indice of mesh point used in order to remesh
+    jM = floor(posM_ad)
+    j0 = floor(pos_adim)
+    jP = nint(posP_ad)
+    jP2= nint(posP2_ad)
+
+    ! Distance to mesh point
+    yM = (posM_ad  - real(jM, WP))
+    y0 = (pos_adim - real(j0, WP))
+    yP = (posP_ad  - real(jP, WP))
+    yP2= (posP2_ad - real(j0, WP))
+
+    ! Interpolation weights
+    !aM3=(yM-2.)*(yM-1.)*yM*(yM+1.)/24.0
+    aM3=yM*(2.+yM*(-1.+yM*(-2.+yM)))/24.0
+    !aM2=(2.-yM)*(yM-1.)*yM*(yM+2.)/6.0
+    aM2 =yM*(-4.+yM*(4.+yM*(1.-yM)))/6.0
+    !aM =(yM-2.)*(yM-1.)*(yM+1.)*(yM+2.)/4.0
+    aM =(4.+(yM**2)*(-5.+yM**2))/4.0
+    !a0 =((2.-yM)*yM*(yM+1.)*(yM+2.)/6.0)
+    a0 =yM*(4+yM*(4-yM*(1.+yM)))/6.0
+    !aP2=(((yM-1.)-1.)*(yM-1.)*((yM-1.)+1.)*((yM-1.)+2.)/24.0)
+    !aP2=yM*(yM-2.)*(yM-1.)*(yM+1.)/24.0
+    aP2=aM3
+    !aP =((yM-1.)*yM*(yM+1.)*(yM+2.)/24.0) - aP2
+    !aP = 1.0 - (aM3+aM2+aM+a0+aP2)
+    aP = 1.0 - (2.*aM3+aM2+aM+a0)
+
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2.+y0*(-1.+y0*(-2.+y0)))/24.0
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4.+y0*(4.+y0*(1.-y0)))/6.0
+    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
+    b0 =(4.+(y0**2)*(-5.+y0**2))/4.0
+    !bP2=(2.-(y0-1.))*(y0-1.)*((y0-1.)+1.)*((y0-1.)+2.)/6.0
+    !bP2=y0*(3.-y0)*(y0-1.)*(y0+1.)/6.0
+    bP2=y0*(-3.+y0*(1.+y0*(3.-y0)))/6.0
+    !bP3=((y0-1.)-1.)*(y0-1.)*((y0-1.)+1.)*((y0-1.)+2.)/24.0
+    !bP3=y0*(y0-2.)*(y0-1.)*(y0+1.)/24.0
+    bP3 = bM2
+    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0 + ((y0-1.)*y0*(y0+1.)*(y0+2.)/24.0) &
+    !       & - (bP2 + bP3)
+    !bP = 1.0 - (bM2 + bM + b0 + bP2 + bP3)
+    bP = 1.0 - (2*bM2 + bM + b0 + bP2)
+
+    !cM =((yP+1)-2.)*((yP+1)-1.)*(yP+1)*((yP+1)+1.)/24.0
+    cM =(yP-1.)*yP*(yP+1)*(yP+2.)/24.0
+    !cM =yP*(-2.+yP*(-1.+yP*(2.+yP)))/24.0
+    !c0 =(2.-(yP+1))*((yP+1)-1.)*(yP+1)*((yP+1)+2.)/6.0
+    !c0 =(1.-yP)*yP*(yP+1)*(yP+3.)/6.0
+    c0 =yP*(3.+yP*(1.-yP*(3.+yP)))/6.0
+    !cP2=(yP-2.)*(yP-1.)*(yP+1.)*(yP+2.)/4.0
+    cP2=(4.+(yP**2)*(-5.+yP**2))/4.0
+    !cP3=(2.-yP)*yP*(yP+1.)*(yP+2.)/6.0
+    cP3=yP*(4+yP*(4-yP*(1.+yP)))/6.0
+    !cP4=(yP-1.)*yP*(yP+1.)*(yP+2.)/24.0
+    cP4=cM
+    !cP =(yP-2.)*(yP-1.)*yP*(yP+1.)/24.0 + ((2.-yP)*(yP-1.)*yP*(yP+2.)/6.0) &
+    !        & - (cM + c0)
+    cP = 1.0 - (cM+c0+cP2+cP3+cP4)
+
+    !e0 =((yP2+1)-2.)*((yP2+1)-1.)*(yP2+1)*((yP2+1)+1.)/24.0
+    !e0 =(yP2-1.)*yP2*(yP2+1)*(yP2+2.)/24.0
+    e0 =yP2*(-2.+yP2*(-1.+yP2*(2.+yP2)))/24.0
+    !eP2=(2.-yP2)*(yP2-1.)*yP2*(yP2+2.)/6.0
+    eP2=yP2*(-4.+yP2*(4.+yP2*(1.-yP2)))/6.0
+    !eP3=(yP2-2.)*(yP2-1.)*(yP2+1.)*(yP2+2.)/4.0
+    eP3=(4.+(yP2**2)*(-5.+yP2**2))/4.0
+    !eP4=(2.-yP2)*yP2*(yP2+1.)*(yP2+2.)/6.0
+    eP4=yP2*(4+yP2*(4-yP2*(1.+yP2)))/6.0
+    !eP5=(yP2-1.)*yP2*(yP2+1.)*(yP2+2.)/24.0
+    eP5=e0
+    !eP =((yP2-2.)*(yP2-1.)*yP2*(yP2+1.)/24.0) - e0
+    eP = 1.0 - (e0+eP2+eP3+eP4+eP5)
+
+    ! remeshing
+    buffer(j0-3) = buffer(j0-3)   +aM3*scaM
+    buffer(j0-2) = buffer(j0-2)   +aM2*scaM +bM2*sca
+    buffer(j0-1) = buffer(j0-1)   + aM*scaM + bM*sca  + cM*scaP
+    buffer(j0)   = buffer(j0)     + a0*scaM + b0*sca  + c0*scaP + e0*scaP2
+    buffer(j0+1) = buffer(j0+1)   + aP*scaM + bP*sca  + cP*scaP + eP*scaP2
+    buffer(j0+2) = buffer(j0+2)   +aP2*scaM +bP2*sca  +cP2*scaP +eP2*scaP2
+    buffer(j0+3) = buffer(j0+3)             +bP3*sca  +cP3*scaP +eP3*scaP2
+    buffer(j0+4) = buffer(j0+4)                       +cP4*scaP +eP4*scaP2
+    buffer(j0+5) = buffer(j0+5)                                 +eP5*scaP2
+
+end subroutine AC_remesh_O4_tag_LC
+
+
+!> M'6 remeshing formula - version for array of pointer.
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_Mprime6(pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                                        :: pos_adim, sca
+    real(WP), dimension(send_j_min:send_j_max), intent(inout)   :: buffer
+    ! Ohter local variables
+    integer     :: j0                       ! indice of the the nearest mesh points
+    real(WP)    :: bM, bM2, b0, bP, bP2, bP3! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    !bM2 =-(((y0+2.)-2)*(5.*(y0+2.)-8.)*((y0+2.)-3.)**3)/24.
+    bM2 = y0*(2. + y0*(-1. + y0*(-9. + (13. - 5.*y0)*y0)))/24.
+    !bM  =(y0+1.-1.)*(y0+1.-2.)*(25.*(y0+1.)**3-114.*(y0+1.)**2+153.*(y0+1.)-48.)/24.
+    bM = y0*(-16. + y0*(16. + y0*(39. + y0*(-64. + 25.*y0))))/24.
+    !bP  =-((1.-y0)-1.)*(25.*(1.-y0)**4-38.*(1.-y0)**3-3.*(1.-y0)**2+12.*(1.-y0)+12)/12.
+    bP = ( y0*(8. + y0*(8. + y0*(33. + y0*(-62. + 25.*y0)))))/12.
+    !bP2 = ((2.-y0)-1.)*((2.-y0)-2.)*(25.*(2.-y0)**3-114.*(2.-y0)**2+153.*(2.-y0)-48.)/24.
+    bP2 = (y0*(-2. + y0*(-1. + y0*(-33. + (61. - 25.*y0)*y0))))/24.
+    !bP3 =-(((3.-y0)-2)*(5.*(3.-y0)-8.)*((3.-y0)-3.)**3)/24.
+    bP3 = (y0**3)*(7. + y0*(5.*y0 - 12.))/24.
+    !b0  =-(y0-1.)*(25.*y0**4-38.*y0**3-3.*y0**2+12.*y0+12)/12.
+    !b0 = (12. + y0**2*(-15. + y0*(-35. + (63. - 25.*y0)*y0)))/12.
+    b0 = 1. - (bM2+bM+bP+bP2+bP3)
+
+    ! remeshing
+    buffer(j0-2) = buffer(j0-2)   + sca*bM2
+    buffer(j0-1) = buffer(j0-1)   + sca*bM
+    buffer(j0)   = buffer(j0)     + sca*b0
+    buffer(j0+1) = buffer(j0+1)   + sca*bP
+    buffer(j0+2) = buffer(j0+2)   + sca*bP2
+    buffer(j0+3) = buffer(j0+3)   + sca*bP3
+
+end subroutine AC_remesh_Mprime6
+
+end module advec_remeshing_line
diff --git a/HySoP/src/scalesReduced/particles/advec_line/advec_remesh_line.F90 b/HySoP/src/scalesReduced/particles/advec_line/advec_remesh_line.F90
new file mode 100644
index 0000000000000000000000000000000000000000..f79d365962b005c0874480555b78a03a1443e765
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_line/advec_remesh_line.F90
@@ -0,0 +1,1310 @@
+!USEFORTEST advec
+!> @addtogroup part
+
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_remesh_line
+!
+!
+! DESCRIPTION:
+!> The module advec_remesh_line contains different semi-optimized remeshing
+!! procedure. They are here for debugging/test/comparaison purpose and will
+!! be deleted in "not to far" future (after adding optimized M'6, having a lot
+!! of validation and having performed benchmarks).
+!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_remesh_line
+
+    use precision_tools
+    use advec_abstract_proc
+    use advec_correction
+    use mpi, only: MPI_INTEGER, MPI_ANY_SOURCE
+    implicit none
+
+    ! ===== Public procedures =====
+    !----- (corrected) lambda 2 Remeshing method -----
+    public                  :: Xremesh_O2       ! order 2
+    public                  :: Yremesh_O2       ! order 2
+    public                  :: Zremesh_O2       ! order 2
+    !----- (corrected) lambda 4 Remeshing method -----
+    public                  :: Xremesh_O4       ! order 4
+    public                  :: Yremesh_O4       ! order 4
+    public                  :: Zremesh_O4       ! order 4
+    !----- M'6 remeshing method -----
+    public                  :: Xremesh_Mprime6
+    public                  :: Yremesh_Mprime6
+    public                  :: Zremesh_Mprime6
+
+
+    ! ===== Private variable ====
+
+contains
+
+! #####################################################################################
+! #####                                                                           #####
+! #####                          Public procedure                                 #####
+! #####                                                                           #####
+! #####################################################################################
+
+! ============================================================================
+! ====================   Remeshing along X subroutines    ====================
+! ============================================================================
+
+!> remeshing along Xwith an order 2 method, corrected to allow large CFL number - group version
+!!    @param[in]        direction   = current direction
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        p_V         = particles velocity (needed for tag and type)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in]        dt          = time step (needed for tag and type)
+!!    @param[in,out]    scal        = scalar field to advect
+subroutine Xremesh_O2(direction, ind_group, gs, p_pos_adim, p_V, j, k, scal, dt)
+
+    use advec_common            ! Some procedures common to advection along all directions
+    use advec_common_line       ! Some procedures common to advection along all directions
+    use advec_remeshing_line    ! Remeshing formula
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    ! To type and tag particles
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+    ! To compute recquired communications
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable, target:: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+                                                            ! sorted by receivers and not by coordinate.
+    integer, dimension(2,gs(1),gs(2))       :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which
+                                                            ! I will receive data
+    integer, dimension(gs(1),gs(2))         :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))         :: proc_max     ! smaller gap between me and the processes to where I send data
+
+    integer                                 :: i1, i2       ! indice of a line into the group
+
+    ! -- Pre-Remeshing: Determine blocks type and tag particles --
+    call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+
+    !  -- Compute ranges --
+    where (bl_type(1,:,:))
+        ! First particle is a centered one
+        send_group_min = nint(p_pos_adim(1,:,:))-1
+    elsewhere
+        ! First particle is a left one
+        send_group_min = floor(p_pos_adim(1,:,:))-1
+    end where
+    where (bl_type(mesh_sc%N_proc(direction)/bl_size +1,:,:))
+        ! Last particle is a centered one
+        send_group_max = nint(p_pos_adim(mesh_sc%N_proc(direction),:,:))+1
+    elsewhere
+        ! Last particle is a left one
+        send_group_max = floor(p_pos_adim(mesh_sc%N_proc(direction),:,:))+1
+    end where
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_group(direction, gs, ind_group, &
+      & send_group_min, send_group_max, proc_min, proc_max, rece_proc)
+
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_group_min(i1,i2)
+            send_j_max = send_group_max(i1,i2)
+
+            ! -- Allocate buffer for remeshing of local particles --
+            allocate(send_buffer(send_j_min:send_j_max))
+            send_buffer = 0.0;
+
+            ! -- Remesh the particles in the buffer --
+            call AC_remesh_lambda2corrected_basic(direction, p_pos_adim(:,i1,i2), scal(:,j+i1-1,k+i2-1), &
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+            ! -- Send the buffer to the matching processus and update the scalar field --
+            scal(:,j+i1-1,k+i2-1) = 0
+            call AC_bufferToScalar_line(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+                & rece_proc(:,i1,i2), send_buffer, scal(:,j+i1-1,k+i2-1))
+
+            ! Deallocate all field
+            deallocate(send_buffer)
+
+        end do
+    end do
+
+end subroutine Xremesh_O2
+
+
+!> remeshing along X with an order 4 method, corrected to allow large CFL number - untagged particles
+!!    @param[in]        direction   = current direction
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        p_pos_adim  = adimensionned particles position
+!!    @param[in]        p_V         = particles velocity (needed for tag and type)
+!!    @param[in]        j,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in,out]    scal          = scalar field to advect
+!!    @param[in]        dt          = time step (needed for tag and type)
+subroutine Xremesh_O4(direction, ind_group, gs, p_pos_adim, p_V, j,k, scal, dt)
+
+    use advec_common            ! Some procedures common to advection along all directions
+    use advec_common_line       ! Some procedures common to advection along all directions
+    use advec_remeshing_line    ! Remeshing formula
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    ! To compute recquired communications
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    ! To type and tag particles
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+        ! Variables used to remesh particles ...
+        ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+        ! designes something I send (resp. I receive).
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which
+                                                        ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+    integer                             :: i1, i2       ! indice of a line into the group
+
+    ! -- Pre-Remeshing: Determine blocks type and tag particles --
+    call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+
+    !  -- Compute ranges --
+    where (bl_type(1,:,:))
+        ! First particle is a centered one
+        send_group_min = nint(p_pos_adim(1,:,:))-2
+    elsewhere
+        ! First particle is a left one
+        send_group_min = floor(p_pos_adim(1,:,:))-2
+    end where
+    where (bl_type(mesh_sc%N_proc(direction)/bl_size +1,:,:))
+        ! Last particle is a centered one
+        send_group_max = nint(p_pos_adim(mesh_sc%N_proc(direction),:,:))+2
+    elsewhere
+        ! Last particle is a left one
+        send_group_max = floor(p_pos_adim(mesh_sc%N_proc(direction),:,:))+2
+    end where
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_com(direction, gs, ind_group, send_group_min, &
+      & send_group_max, proc_min, proc_max, rece_proc)
+
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_group_min(i1,i2)
+            send_j_max = send_group_max(i1,i2)
+
+            ! -- Allocate buffer for remeshing of local particles --
+            allocate(send_buffer(send_j_min:send_j_max))
+            send_buffer = 0.0;
+
+            ! -- Remesh the particles in the buffer --
+            call AC_remesh_lambda4corrected_basic(direction, p_pos_adim(:,i1,i2), scal(:,j+i1-1,k+i2-1), &
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+            ! -- Send the buffer to the matching processus and update the scalar field --
+            scal(:,j+i1-1,k+i2-1) = 0
+            call AC_bufferToScalar_line(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+                & rece_proc(:,i1,i2), send_buffer, scal(:,j+i1-1,k+i2-1))
+
+            ! Deallocate all field
+            deallocate(send_buffer)
+
+        end do
+    end do
+
+end subroutine Xremesh_O4
+
+
+!> remeshing along X with M'6 formula - No tag neither correction for large time steps.
+!!    @param[in]        direction   = current direction
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along X direction)
+!!    @param[in]        p_pos_adim  = adimensionned particles position
+!!    @param[in]        p_V         = particles velocity (only to have the same profile
+!!                                      then other remeshing procedures)
+!!    @param[in]        j,k         = indice of of the current line (y-coordinate and z-coordinate)
+!!    @param[in]        dt          = time step (only to have the same profile
+!!                                      then other remeshing procedures)
+!!    @param[in,out]    scal        = scalar field to advect
+subroutine Xremesh_Mprime6(direction, ind_group, gs, p_pos_adim, p_V, j,k,scal, dt)
+
+    use advec_common_line       ! Some procedures common to advection along all directions
+    use advec_remeshing_line    ! Remeshing formula
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/outpu
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: j, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    ! To compute recquired communications
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    ! Variables used to remesh particles ...
+        ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+        ! designes something I send (resp. I receive).
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which
+                                                        ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+    integer                             :: i1, i2       ! indice of a line into the group
+    integer                             :: i            ! indice of the current particle
+
+    !  -- Compute the remeshing domain --
+    send_group_min = floor(p_pos_adim(1,:,:)-2)
+    send_group_max = floor(p_pos_adim(mesh_sc%N_proc(direction),:,:)+3)
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_com(direction, gs, ind_group, send_group_min, &
+      & send_group_max, proc_min, proc_max, rece_proc)
+
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_group_min(i1,i2)
+            send_j_max = send_group_max(i1,i2)
+
+            ! -- Allocate buffer for remeshing of local particles --
+            allocate(send_buffer(send_j_min:send_j_max))
+            send_buffer = 0.0;
+
+            ! -- Remesh the particles in the buffer --
+            do i = 1, mesh_sc%N_proc(direction), 1
+                call AC_remesh_Mprime6(p_pos_adim(i,i1,i2),scal(i,j+i1-1,k+i2-1), send_buffer)
+            end do
+
+            ! -- Send the buffer to the matching processus and update the scalar field --
+            scal(:,j+i1-1,k+i2-1) = 0
+            call AC_bufferToScalar_line(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+                & rece_proc(:,i1,i2), send_buffer, scal(:,j+i1-1,k+i2-1))
+
+            ! Deallocate all field
+            deallocate(send_buffer)
+
+        end do
+    end do
+
+end subroutine Xremesh_Mprime6
+
+
+! ============================================================================
+! ====================   Remeshing along Y subroutines    ====================
+! ============================================================================
+
+!> remeshing along Y with an order 2 method, corrected to allow large CFL number - group version
+!!    @param[in]        direction   = current direction
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along Y direction)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        p_V         = particles velocity (needed for tag and type)
+!!    @param[in]        i,k         = indice of of the current line (y-coordinate and z-coordinate)
+!!    @param[in]        dt          = time step (needed for tag and type)
+!!    @param[in,out]    scal        = scalar field to advect
+subroutine Yremesh_O2(direction, ind_group, gs, p_pos_adim, P_V,i,k,scal, dt)
+
+    use advec_common            ! Some procedures common to advection along all directions
+    use advec_common_line       ! Some procedures common to advection along all directions
+    use advec_remeshing_line    ! Remeshing formula
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: i, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    ! To type and tag particles
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+    ! To compute recquired communications
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which
+                                                        ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+
+    integer                             :: i1, i2       ! indice of a line into the group
+
+    ! -- Pre-Remeshing: Determine blocks type and tag particles --
+    call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+
+    !  -- Compute ranges --
+    where (bl_type(1,:,:))
+        ! First particle is a centered one
+        send_group_min = nint(p_pos_adim(1,:,:))-1
+    elsewhere
+        ! First particle is a left one
+        send_group_min = floor(p_pos_adim(1,:,:))-1
+    end where
+    where (bl_type(mesh_sc%N_proc(direction)/bl_size +1,:,:))
+        ! Last particle is a centered one
+        send_group_max = nint(p_pos_adim(mesh_sc%N_proc(direction),:,:))+1
+    elsewhere
+        ! Last particle is a left one
+        send_group_max = floor(p_pos_adim(mesh_sc%N_proc(direction),:,:))+1
+    end where
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_group(direction, gs, ind_group, send_group_min, &
+      & send_group_max, proc_min, proc_max, rece_proc)
+
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_group_min(i1,i2)
+            send_j_max = send_group_max(i1,i2)
+
+            ! -- Allocate buffer for remeshing of local particles --
+            allocate(send_buffer(send_j_min:send_j_max))
+            send_buffer = 0.0;
+
+            ! -- Remesh the particles in the buffer --
+            call AC_remesh_lambda2corrected_basic(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,:,k+i2-1), &
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+            ! -- Send the buffer to the matching processus and update the scalar field --
+            scal(i+i1-1,:,k+i2-1) = 0
+            call AC_bufferToScalar_line(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+                & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,:,k+i2-1))
+
+            ! Deallocate all field
+            deallocate(send_buffer)
+
+        end do
+    end do
+
+end subroutine Yremesh_O2
+
+
+!> remeshing along Y with an order 4 method, corrected to allow large CFL number - untagged particles
+!!    @param[in]        direction   = current direction
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along Y direction)
+!!    @param[in]        p_pos_adim  = adimensionned particles position
+!!    @param[in]        p_V         = particles velocity (needed for tag and type)
+!!    @param[in]        bl_tag      = contains information about block (is it tagged ?)
+!!    @param[in]        i,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        dt          = time step (needed for tag and type)
+subroutine Yremesh_O4(direction, ind_group, gs, p_pos_adim, p_V, i,k,scal, dt)
+
+    use advec_common            ! Some procedures common to advection along all directions
+    use advec_common_line       ! Some procedures common to advection along all directions
+    use advec_remeshing_line ! Remeshing formula
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! input/output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: i, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    ! To compute recquired communications
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    ! To type and tag particles
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+    ! Variables used to remesh particles ...
+        ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+        ! designes something I send (resp. I receive).
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which
+                                                        ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+    integer                             :: i1, i2       ! indice of a line into the group
+
+    ! -- Pre-Remeshing: Determine blocks type and tag particles --
+    call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+
+    !  -- Compute ranges --
+    where (bl_type(1,:,:))
+        ! First particle is a centered one
+        send_group_min = nint(p_pos_adim(1,:,:))-2
+    elsewhere
+        ! First particle is a left one
+        send_group_min = floor(p_pos_adim(1,:,:))-2
+    end where
+    where (bl_type(mesh_sc%N_proc(direction)/bl_size +1,:,:))
+        ! Last particle is a centered one
+        send_group_max = nint(p_pos_adim(mesh_sc%N_proc(direction),:,:))+2
+    elsewhere
+        ! Last particle is a left one
+        send_group_max = floor(p_pos_adim(mesh_sc%N_proc(direction),:,:))+2
+    end where
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_group(direction, gs, ind_group, send_group_min, &
+      & send_group_max, proc_min, proc_max, rece_proc)
+
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_group_min(i1,i2)
+            send_j_max = send_group_max(i1,i2)
+
+            ! -- Allocate buffer for remeshing of local particles --
+            allocate(send_buffer(send_j_min:send_j_max))
+            send_buffer = 0.0;
+
+            ! -- Remesh the particles in the buffer --
+            call AC_remesh_lambda4corrected_basic(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,:,k+i2-1), &
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+            ! -- Send the buffer to the matching processus and update the scalar field --
+            scal(i+i1-1,:,k+i2-1) = 0
+            call AC_bufferToScalar_line(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+                & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,:,k+i2-1))
+
+            ! Deallocate all field
+            deallocate(send_buffer)
+
+        end do
+    end do
+
+end subroutine Yremesh_O4
+
+
+!> remeshing along Y with M'6 formula - No tag neither correction for large time steps.
+!!    @param[in]        direction   = current direction
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along Y direction)
+!!    @param[in]        p_pos_adim  = adimensionned particles position
+!!    @param[in]        p_V         = particles velocity (only to have the same profile
+!!                                      then other remeshing procedures)
+!!    @param[in]        i,k         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        dt          = time step (only to have the same profile
+!!                                      then other remeshing procedures)
+subroutine Yremesh_Mprime6(direction, ind_group, gs, p_pos_adim, p_V, i,k,scal, dt)
+
+    use advec_common_line       ! Some procedures common to advection along all directions
+    use advec_remeshing_line    ! Remeshing formula
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! input/output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: i, k
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    ! To compute recquired communications
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    ! Variables used to remesh particles ...
+        ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+        ! designes something I send (resp. I receive).
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which
+                                                        ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+    integer                             :: i1, i2       ! indice of a line into the group
+    integer                             :: ind_p        ! indice of the current particle
+
+    !  -- Compute the remeshing domain --
+    send_group_min = floor(p_pos_adim(1,:,:)-2)
+    send_group_max = floor(p_pos_adim(mesh_sc%N_proc(direction),:,:)+3)
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_com(direction, gs, ind_group, send_group_min, &
+      & send_group_max, proc_min, proc_max, rece_proc)
+
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_group_min(i1,i2)
+            send_j_max = send_group_max(i1,i2)
+
+            !  -- Allocate and initialize the buffer --
+            allocate(send_buffer(send_j_min:send_j_max))
+            send_buffer = 0.0;
+
+            ! -- Remesh the particles in the buffer --
+            do ind_p = 1, mesh_sc%N_proc(direction), 1
+                call AC_remesh_Mprime6(p_pos_adim(ind_p,i1,i2),scal(i+i1-1,ind_p,k+i2-1), send_buffer)
+            end do
+
+            ! -- Send the buffer to the matching processus and update the scalar field --
+            scal(i+i1-1,:,k+i2-1) = 0
+            call AC_bufferToScalar_line(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+                & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,:,k+i2-1))
+
+            ! Deallocate all field
+            deallocate(send_buffer)
+
+        end do
+    end do
+
+end subroutine Yremesh_Mprime6
+
+
+! ============================================================================
+! ====================   Remeshing along Z subroutines    ====================
+! ============================================================================
+
+!> remeshing along Z with an order 2 method, corrected to allow large CFL number - group version
+!!    @param[in]        direction   = current direction
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along Z direction)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        p_V         = particles velocity (needed for tag and type)
+!!    @param[in]        i,j         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        dt          = time step (needed for tag and type)
+subroutine Zremesh_O2(direction, ind_group, gs, p_pos_adim, p_V,i,j,scal, dt)
+
+    use advec_common            ! Some procedures common to advection along all directions
+    use advec_common_line       ! Some procedures common to advection along all directions
+    use advec_remeshing_line    ! Remeshing formula
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: i, j
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    ! To compute recquired communications
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    ! To type and tag particles
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+    ! Variable used to remesh particles in a buffer
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which
+                                                        ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+
+    integer                             :: i1, i2       ! indice of a line into the group
+
+    ! -- Pre-Remeshing: Determine blocks type and tag particles --
+    call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+
+    !  -- Compute ranges --
+    where (bl_type(1,:,:))
+        ! First particle is a centered one
+        send_group_min = nint(p_pos_adim(1,:,:))-1
+    elsewhere
+        ! First particle is a left one
+        send_group_min = floor(p_pos_adim(1,:,:))-1
+    end where
+    where (bl_type(mesh_sc%N_proc(direction)/bl_size +1,:,:))
+        ! Last particle is a centered one
+        send_group_max = nint(p_pos_adim(mesh_sc%N_proc(direction),:,:))+1
+    elsewhere
+        ! Last particle is a left one
+        send_group_max = floor(p_pos_adim(mesh_sc%N_proc(direction),:,:))+1
+    end where
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_group(direction, gs, ind_group, send_group_min, &
+      & send_group_max, proc_min, proc_max, rece_proc)
+
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_group_min(i1,i2)
+            send_j_max = send_group_max(i1,i2)
+
+            ! -- Allocate buffer for remeshing of local particles --
+            allocate(send_buffer(send_j_min:send_j_max))
+            send_buffer = 0.0;
+
+            ! -- Remesh the particles in the buffer --
+            call AC_remesh_lambda2corrected_basic(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,j+i2-1,:), &
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+            ! -- Send the buffer to the matching processus and update the scalar field --
+            scal(i+i1-1,j+i2-1,:) = 0
+            call AC_bufferToScalar_line(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+                & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,j+i2-1,:))
+
+            ! Deallocate all field
+            deallocate(send_buffer)
+
+        end do
+    end do
+
+end subroutine Zremesh_O2
+
+
+!> remeshing along Z with an order 4 method, corrected to allow large CFL number - untagged particles
+!!    @param[in]        direction   = current direction
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along Z direction)
+!!    @param[in]        p_pos_adim  = adimensionned particles position
+!!    @param[in]        p_V         = particles velocity (needed for tag and type)
+!!    @param[in]        i,j         = indice of of the current line (x-coordinate and z-coordinate)
+!!    @param[in,out]    scal          = scalar field to advect
+!!    @param[in]        dt          = time step (needed for tag and type)
+subroutine Zremesh_O4(direction, ind_group, gs, p_pos_adim, p_V,i,j,scal, dt)
+
+    use advec_common            ! Some procedures common to advection along all directions
+    use advec_common_line       ! Some procedures common to advection along all directions
+    use advec_remeshing_line    ! Remeshing formula
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! Input/Output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: i, j
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    ! To type and tag particles
+    logical, dimension(bl_nb(direction)+1,gs(1),gs(2))  :: bl_type      ! is the particle block a center block or a left one ?
+    logical, dimension(bl_nb(direction),gs(1),gs(2))    :: bl_tag       ! indice of tagged particles
+    ! To compute recquired communications
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    ! Variables used to remesh particles ...
+        ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+        ! designes something I send (resp. I receive).
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which
+                                                        ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+    integer                             :: i1, i2       ! indice of a line into the group
+
+    ! -- Pre-Remeshing: Determine blocks type and tag particles --
+    call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag)
+
+    !  -- Compute ranges --
+    where (bl_type(1,:,:))
+        ! First particle is a centered one
+        send_group_min = nint(p_pos_adim(1,:,:))-2
+    elsewhere
+        ! First particle is a left one
+        send_group_min = floor(p_pos_adim(1,:,:))-2
+    end where
+    where (bl_type(mesh_sc%N_proc(direction)/bl_size +1,:,:))
+        ! Last particle is a centered one
+        send_group_max = nint(p_pos_adim(mesh_sc%N_proc(direction),:,:))+2
+    elsewhere
+        ! Last particle is a left one
+        send_group_max = floor(p_pos_adim(mesh_sc%N_proc(direction),:,:))+2
+    end where
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_group(direction, gs, ind_group, send_group_min, &
+      & send_group_max, proc_min, proc_max, rece_proc)
+
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_group_min(i1,i2)
+            send_j_max = send_group_max(i1,i2)
+
+            ! -- Allocate buffer for remeshing of local particles --
+            allocate(send_buffer(send_j_min:send_j_max))
+            send_buffer = 0.0;
+
+            ! -- Remesh the particles in the buffer --
+            call AC_remesh_lambda4corrected_basic(direction, p_pos_adim(:,i1,i2), scal(i+i1-1,j+i2-1,:), &
+                & bl_type(:,i1,i2), bl_tag(:,i1,i2), send_j_min, send_j_max, send_buffer)
+
+            ! -- Send the buffer to the matching processus and update the scalar field --
+            scal(i+i1-1,j+i2-1,:) = 0
+            call AC_bufferToScalar_line(direction, ind_group , send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+                & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,j+i2-1,:))
+
+            ! Deallocate all field
+            deallocate(send_buffer)
+
+        end do
+    end do
+
+end subroutine Zremesh_O4
+
+
+!> remeshing along Z with M'6 formula - No tag neither correction for large time steps.
+!!    @param[in]        direction   = current direction
+!!    @param[in]        ind_group   = coordinate of the current group of lines
+!!    @param[in]        gs          = size of groups (along Z direction)
+!!    @param[in]        p_pos_adim  = adimensionned particles position
+!!    @param[in]        p_V         = particles velocity (only to have the same profile
+!!                                      then other remeshing procedures)
+!!    @param[in]        i,j         = indice of of the current line (x-coordinate and y-coordinate)
+!!    @param[in,out]    scal        = scalar field to advect
+!!    @param[in]        dt          = time step (only to have the same profile
+!!                                      then other remeshing procedures)
+subroutine Zremesh_Mprime6(direction, ind_group, gs, p_pos_adim, p_V, i,j,scal, dt)
+
+    use advec_common_line       ! Some procedures common to advection along all directions
+    use advec_remeshing_line    ! Remeshing formula
+    use advec_variables         ! contains info about solver parameters and others.
+    use cart_topology     ! Description of mesh and of mpi topology
+
+    ! input/output
+    integer, intent(in)                         :: direction
+    integer, dimension(2), intent(in)           :: ind_group
+    integer, dimension(2), intent(in)           :: gs
+    integer, intent(in)                         :: i, j
+    real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+    real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+    real(WP), dimension(:,:,:), intent(inout)   :: scal
+    real(WP), intent(in)                        :: dt
+    ! Other local variables
+    ! To compute recquired communications
+    integer, dimension(gs(1), gs(2))        :: send_group_min     ! distance between me and processus wich send me information
+    integer, dimension(gs(1), gs(2))        :: send_group_max     ! distance between me and processus wich send me information
+    ! Variables used to remesh particles ...
+        ! ... and to communicate between subdomains. A variable prefixed by "send_"(resp "rece")
+        ! designes something I send (resp. I receive).
+    real(WP),dimension(:),allocatable   :: send_buffer  ! buffer use to remesh the scalar before to send it to the right subdomain
+    integer, dimension(2,gs(1),gs(2))   :: rece_proc    ! minimal and maximal gap between my Y-coordinate and the one from which
+                                                        ! I will receive data
+    integer, dimension(gs(1),gs(2))     :: proc_min     ! smaller gap between me and the processes to where I send data
+    integer, dimension(gs(1),gs(2))     :: proc_max     ! smaller gap between me and the processes to where I send data
+    integer                             :: i1, i2       ! indice of a line into the group
+    integer                             :: ind_p        ! indice of the current particle
+
+    !  -- Compute the remeshing domain --
+    send_group_min = floor(p_pos_adim(1,:,:)-2)
+    send_group_max = floor(p_pos_adim(mesh_sc%N_proc(direction),:,:)+3)
+
+    ! -- Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) --
+    call AC_obtain_senders_com(direction, gs, ind_group, send_group_min, &
+      & send_group_max, proc_min, proc_max, rece_proc)
+
+    do i2 = 1, gs(2)
+        do i1 = 1, gs(1)
+            send_j_min = send_group_min(i1,i2)
+            send_j_max = send_group_max(i1,i2)
+
+            !  -- Allocate and initialize the buffer --
+            allocate(send_buffer(send_j_min:send_j_max))
+            send_buffer = 0.0;
+
+            ! -- Remesh the particles in the buffer --
+            do ind_p = 1, mesh_sc%N_proc(direction), 1
+                call AC_remesh_Mprime6(p_pos_adim(ind_p,i1,i2),scal(i+i1-1,j+i2-1, ind_p), send_buffer)
+            end do
+
+            ! -- Send the buffer to the matching processus and update the scalar field --
+            scal(i+i1-1,j+i2-1,:) = 0
+            call AC_bufferToScalar_line(direction, ind_group, send_j_min, send_j_max, proc_min(i1,i2), proc_max(i1,i2), &
+                & rece_proc(:,i1,i2), send_buffer, scal(i+i1-1,j+i2-1,:))
+
+            ! Deallocate all field
+            deallocate(send_buffer)
+
+        end do
+    end do
+
+end subroutine Zremesh_Mprime6
+
+
+! #####################################################################################
+! #####                                                                           #####
+! #####                          Private procedure                                 #####
+! #####                                                                           #####
+! #####################################################################################
+
+! =====================================================================================
+! ====================   Remeshing tool to determine comunications ====================
+! =====================================================================================
+
+!> Determine the set of processes wich will send me information during the remeshing
+!! and compute for each of these processes the range of wanted data. Use implicit
+!! computation rather than communication (only possible if particle are gather by
+!! block whith contrainst on velocity variation - as corrected lambda formula.) -
+!! work directly on a group of particles lines.
+!     @param[in]    send_group_min  = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!     @param[in]    send_group_max  = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[in]    direction       = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]    ind_group       = coordinate of the current group of lines
+!!    @param[out]   send_min        = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[out]   send_max        = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[out]   proc_min        = gap between my coordinate and the processes of minimal coordinate which will receive information from me
+!!    @param[out]   proc_max        = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+!!    @param[out]   rece_proc       = coordinate range of processes which will send me information during the remeshing.
+!!    @param[in]    gp_s            = size of group of line along the current direction
+!! @details
+!!    Work on a group of line of size gs(1) x gs(2))
+!!    Obtain the list of processts which are associated to sub-domain where my partticles
+!!    will be remeshed and the list of processes wich contains particles which
+!!    have to be remeshed in my sub-domain. This way, this procedure determine
+!!    which processus need to communicate together in order to proceed to the
+!!    remeshing (as in a parrallel context the real space is subdivised and each
+!!    processus contains a part of it)
+!!        In the same time, it computes for each processus with which I will
+!!    communicate, the range of mesh point involved for each line of particles
+!!    inside the group and it stores it by using some sparse matrix technics
+!!    (see cartography defined in the algorithm documentation)
+!!        This routine does not involve any computation to determine if
+!!    a processus is the first or the last processes (considering its coordinate along
+!!    the current directory) to send remeshing information to a given processes.
+!!    It directly compute it using contraints on velocity (as in corrected lambda
+!!    scheme) When possible use it rather than AC_obtain_senders_com
+subroutine AC_obtain_senders_group(direction, gp_s, ind_group, send_min, send_max, proc_min, proc_max, rece_proc)
+! XXX Work only for periodic condition. For dirichlet conditions : it is
+! possible to not receive either rece_proc(1), either rece_proc(2) or none of
+! these two => detect it (track the first and the last particles) and deal with it.
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/output
+    integer, intent(in)                             :: direction
+    integer, dimension(2), intent(in)               :: ind_group
+    integer(kind=4), dimension(:,:), intent(out)    :: proc_min, proc_max
+    integer, dimension(:,:,:), intent(out)          :: rece_proc
+    integer, dimension(2), intent(in)               :: gp_s
+    integer, dimension(:,:), intent(in)             :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)             :: send_max     ! distance between me and processus wich send me information
+    ! Other local variable
+    integer(kind=4)                                 :: proc_gap         ! gap between a processus coordinate (along the current
+                                                                        ! direction) into the mpi-topology and my coordinate
+    integer                                         :: rankP, rankN     ! processus rank for shift (P= previous, N = next)
+    integer, dimension(2)                           :: tag_table        ! mpi message tag (for communicate rece_proc(1) and rece_proc(2))
+    integer                                         :: proc_max_abs     ! maximum of proc_max array
+    integer                                         :: proc_min_abs     ! minimum of proc_min array
+    integer, dimension(:,:), allocatable            :: first, last      ! Storage processus to which I will be the first (or the last) to send
+                                                                        ! remeshed particles
+    integer, dimension(2)                           :: first_condition  ! allowed range of value of proc_min and proc_max for being the first
+    integer, dimension(2)                           :: last_condition   ! allowed range of value of proc_min and proc_max for being the last
+    integer, dimension(:,:),allocatable             :: send_request     ! mpi status of nonblocking send
+    integer                                         :: ierr             ! mpi error code
+    integer, dimension(MPI_STATUS_SIZE)             :: statut           ! mpi status
+    integer                                         :: ind1, ind2       ! indice of the current line inside the group
+    integer                                         :: min_size         ! begin indice in first and last to stock indice along first dimension of the group line
+    integer                                         :: max_size         ! maximum size of first/last along the first direction
+    integer                                         :: indice           ! internal indice
+    integer, dimension(1 + gp_s(2)*(2+gp_s(1)))     :: rece_buffer      ! buffer for reception of rece_max
+
+    rece_proc = 3*mesh_sc%N(direction)
+    max_size = size(rece_buffer) + 1
+
+    proc_min = floor(real(send_min-1, WP)/mesh_sc%N_proc(direction))
+    proc_max = floor(real(send_max-1, WP)/mesh_sc%N_proc(direction))
+    proc_min_abs = minval(proc_min)
+    proc_max_abs = maxval(proc_max)
+
+    allocate(send_request(proc_min_abs:proc_max_abs,3))
+    send_request(:,3) = 0
+
+    ! -- Determine if I am the first or the last to send information to a given
+    ! processus and sort line by target processes for which I am the first and
+    ! for which I am the last. --
+    tag_table = compute_tag(ind_group, tag_obtsend_NP, direction)
+    min_size = 2 + gp_s(2)
+    allocate(first(max_size,proc_min_abs:proc_max_abs))
+    first = 0
+    first(1,:) = min_size
+    allocate(last(max_size,proc_min_abs:proc_max_abs))
+    last = 0
+    last(1,:) = min_size
+    do proc_gap = proc_min_abs, proc_max_abs
+        first(2,proc_gap) = -proc_gap
+        last(2,proc_gap) = -proc_gap
+        first_condition(2) = proc_gap*mesh_sc%N_proc(direction)+1
+        first_condition(1) = 1-2*bl_bound_size + first_condition(2)
+        last_condition(2)  = (proc_gap+1)*mesh_sc%N_proc(direction)
+        last_condition(1)  = -1+2*bl_bound_size + last_condition(2)
+        do ind2 = 1, gp_s(2)
+            first(2+ind2,proc_gap) = 0
+            last(2+ind2,proc_gap) =  0
+            do ind1 = 1, gp_s(1)
+                ! Compute if I am the first.
+                if ((send_min(ind1,ind2)< first_condition(1)).AND. &
+                        & (send_max(ind1,ind2)>= first_condition(2))) then
+                    first(2+ind2,proc_gap) =  first(2+ind2,proc_gap)+1
+                    first(1,proc_gap) = first(1,proc_gap) + 1
+                    first(first(1,proc_gap),proc_gap) = ind1
+                end if
+                ! Compute if I am the last.
+                if ((send_max(ind1,ind2) > last_condition(1)) &
+                            & .AND.(send_min(ind1,ind2)<= last_condition(2))) then
+                    last(2+ind2,proc_gap) =  last(2+ind2,proc_gap)+1
+                    last(1,proc_gap) = last(1,proc_gap) + 1
+                    last(last(1,proc_gap),proc_gap) = ind1
+                end if
+            end do
+        end do
+    end do
+
+#ifdef PART_DEBUG
+    do proc_gap = proc_min_abs, proc_max_abs
+        if (first(1,proc_gap)>max_size) then
+            print*, 'too big array on proc = ', cart_rank, ' - proc_gap = ', proc_gap
+            print*, 'it occurs on AC_obtain_senders_group - array concerned : "first"'
+            print*, 'first = ', first(1,proc_gap)
+        end if
+        if (last(1,proc_gap)>max_size) then
+            print*, 'too big array on proc = ', cart_rank, ' - proc_gap = ', proc_gap
+            print*, 'it occurs on AC_obtain_senders_group - array concerned : "last"'
+            print*, 'last = ', last(1,proc_gap)
+        end if
+    end do
+#endif
+
+    ! -- Send information if I am the first or the last --
+    do proc_gap = proc_min_abs, proc_max_abs
+        ! I am the first ?
+        if (first(1,proc_gap)>min_size) then
+            ! Compute the rank of the target processus
+            call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
+            if(rankN /= D_rank(direction)) then
+                call mpi_Isend(first(2,proc_gap), first(1,proc_gap)-1, MPI_INTEGER, rankN, tag_table(1), D_comm(direction), &
+                        & send_request(proc_gap,1), ierr)
+                send_request(proc_gap,3) = 1
+            else
+                indice = min_size
+                do ind2 = 1, gp_s(2)
+                    do ind1 = 1, first(2+ind2,proc_gap)
+                        indice = indice+1
+                        rece_proc(1,first(indice,proc_gap),ind2) = -proc_gap
+                    end do
+                end do
+            end if
+        end if
+        ! I am the last ?
+        if (last(1,proc_gap)>min_size) then
+            ! Compute the rank of the target processus
+            call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
+            if(rankN /= D_rank(direction)) then
+                call mpi_Isend(last(2,proc_gap), last(1,proc_gap)-1, MPI_INTEGER, rankN, tag_table(2), D_comm(direction), &
+                        & send_request(proc_gap,2), ierr)
+                send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
+            else
+                indice = min_size
+                do ind2 = 1, gp_s(2)
+                    do ind1 = 1, last(2+ind2,proc_gap)
+                        indice = indice+1
+                        rece_proc(2,last(indice,proc_gap),ind2) = -proc_gap
+                    end do
+                end do
+            end if
+        end if
+    end do
+
+    ! -- Receive it --
+    ! size_max = size(rece_buffer) ! 2 + 2*gp_s(1)*gp_s(2)
+    max_size = max_size-1
+    do while(any(rece_proc(1,:,:) == 3*mesh_sc%N(direction)))
+        call mpi_recv(rece_buffer(1), max_size, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+        indice = min_size-1
+        do ind2 = 1, gp_s(2)
+            do ind1 = 1, rece_buffer(1+ind2)
+                indice = indice+1
+                rece_proc(1,rece_buffer(indice),ind2) = rece_buffer(1)
+            end do
+        end do
+    end do
+    do while(any(rece_proc(2,:,:) == 3*mesh_sc%N(direction)))
+        call mpi_recv(rece_buffer(1), max_size, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+        indice = min_size-1
+        do ind2 = 1, gp_s(2)
+            do ind1 = 1, rece_buffer(1+ind2)
+                indice = indice+1
+                rece_proc(2,rece_buffer(indice),ind2) = rece_buffer(1)
+            end do
+        end do
+    end do
+
+    ! -- Free Isend buffer --
+    do proc_gap = proc_min_abs, proc_max_abs
+        select case (send_request(proc_gap,3))
+            case (3)
+                call mpi_wait(send_request(proc_gap,1), statut, ierr)
+                call mpi_wait(send_request(proc_gap,2), statut, ierr)
+            case (2)
+                call mpi_wait(send_request(proc_gap,2), statut, ierr)
+            case (1)
+                call mpi_wait(send_request(proc_gap,1), statut, ierr)
+        end select
+    end do
+
+    deallocate(first)
+    deallocate(last)
+    deallocate(send_request)
+
+end subroutine AC_obtain_senders_group
+
+
+!> Determine the set of processes wich will send me information during the
+!!  scalar remeshing by explicit (and exensive) way : communications !
+!!    @param[in]    direction   = current direction (1 = along X, 2 = along Y, 3 = along Z)
+!!    @param[in]    ind_group   = coordinate of the current group of lines
+!!    @param[out]   send_min        = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[out]   send_max        = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains)
+!!    @param[out]   proc_min    = gap between my coordinate and the processes of minimal coordinate which will receive information from me
+!!    @param[out]   proc_max    = gap between my coordinate and the processes of maximal coordinate which will receive information from me
+!!    @param[out]   rece_proc   = coordinate range of processes which will send me information during the remeshing.
+!!    @param[in]    gp_s        = size of group of line along the current direction
+!!    @param[in]    com         = integer used to distinguish this function from AC_obtain_senders_group.
+!! @details
+!!    Obtain the list of processus which contains some particles which belong to
+!!    my subdomains after their advection (and thus which will be remeshing into
+!!    my subdomain). This result is return as an interval [send_min; send_max].
+!!    All the processus whose coordinate (into the current direction) belong to
+!!    this segment are involved into scalar remeshing into the current
+!!    subdomains. Use this method when the sender are not predictable without
+!!    communication, as in M'6 schemes for instance. More precisly, it
+!!    correspond do scheme without bloc of particle involving velocity variation
+!!    contrainsts to avoid that the distance between to particle grows (or dimishes)
+!!    too much.
+subroutine AC_obtain_senders_com(direction, gp_s, ind_group, send_min, send_max, proc_min, proc_max, rece_proc)
+! XXX Work only for periodic condition. See AC_obtain_senders. Adapt it for
+! other condition must be more easy.
+
+    use cart_topology   ! info about mesh and mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/output
+    integer, intent(in)                             :: direction
+    integer, dimension(2), intent(in)               :: ind_group
+    integer(kind=4), dimension(:,:), intent(out)    :: proc_min, proc_max
+    integer, dimension(:,:,:), intent(out)          :: rece_proc
+    integer, dimension(2), intent(in)               :: gp_s
+    integer, dimension(:,:), intent(in)             :: send_min     ! distance between me and processus wich send me information
+    integer, dimension(:,:), intent(in)             :: send_max     ! distance between me and processus wich send me information
+    ! Other local variable
+    integer(kind=4)                                 :: proc_gap         ! gap between a processus coordinate (along the current
+                                                                        ! direction) into the mpi-topology and my coordinate
+    integer                                         :: rankP, rankN     ! processus rank for shift (P= previous, N = next)
+    integer, dimension(2)                           :: tag_table        ! mpi message tag (for communicate rece_proc(1) and rece_proc(2))
+    integer, dimension(gp_s(1), gp_s(2))            :: proc_max_prev    ! maximum gap between previous processus and the receivers of its remeshing buffer
+    integer, dimension(gp_s(1), gp_s(2))            :: proc_min_next    ! minimum gap between next processus and the receivers of its remeshing buffer
+    integer                                         :: proc_max_abs     ! maximum of proc_max array
+    integer                                         :: proc_min_abs     ! minimum of proc_min array
+    integer, dimension(:,:), allocatable            :: first, last      ! Storage processus to which I will be the first (or the last) to send
+                                                                        ! remeshed particles
+    integer, dimension(:,:),allocatable             :: send_request     ! mpi status of nonblocking send
+    integer                                         :: ierr             ! mpi error code
+    integer, dimension(MPI_STATUS_SIZE)             :: statut           ! mpi status
+    integer                                         :: ind1, ind2       ! indice of the current line inside the group
+    integer                                         :: min_size         ! begin indice in first and last to stock indice along first dimension of the group line
+    integer                                         :: max_size         ! maximum size of first/last along the first direction
+    integer                                         :: indice           ! internal indice
+    integer, dimension(1 + gp_s(2)*(2+gp_s(1)))     :: rece_buffer      ! buffer for reception of rece_max
+
+
+    rece_proc = 3*mesh_sc%N(direction)
+    max_size = size(rece_buffer) + 1
+
+    proc_min = floor(real(send_min-1, WP)/mesh_sc%N_proc(direction))
+    proc_max = floor(real(send_max-1, WP)/mesh_sc%N_proc(direction))
+    proc_min_abs = minval(proc_min)
+    proc_max_abs = maxval(proc_max)
+
+    allocate(send_request(proc_min_abs:proc_max_abs,3))
+    send_request(:,3) = 0
+
+    ! -- Exchange send_block_min and send_block_max to determine if I am the first
+    ! or the last to send information to a given target processus. --
+    min_size = gp_s(1)*gp_s(2)
+    ! Compute message tag - we re-use tag_part_tag_NP id as using this procedure
+    ! suppose not using "AC_type_and_block"
+    tag_table = compute_tag(ind_group, tag_part_tag_NP, direction)
+    ! Exchange "ghost"
+    call mpi_Sendrecv(proc_min(1,1), min_size, MPI_INTEGER, neighbors(direction,1), tag_table(1), &
+            & proc_min_next(1,1), min_size, MPI_INTEGER, neighbors(direction,2), tag_table(1),    &
+            & D_comm(direction), statut, ierr)
+    call mpi_Sendrecv(proc_max(1,1), min_size, MPI_INTEGER, neighbors(direction,2), tag_table(2), &
+            & proc_max_prev(1,1), min_size, MPI_INTEGER, neighbors(direction,1), tag_table(2),    &
+            & D_comm(direction), statut, ierr)
+
+    ! -- Determine if I am the first or the last to send information to a given
+    ! processus and sort line by target processes for which I am the first and
+    ! for which I am the last. --
+    tag_table = compute_tag(ind_group, tag_obtsend_NP, direction)
+    min_size = 2 + gp_s(2)
+    allocate(first(max_size,proc_min_abs:proc_max_abs))
+    first = 0
+    first(1,:) = min_size
+    allocate(last(max_size,proc_min_abs:proc_max_abs))
+    last = 0
+    last(1,:) = min_size
+    do proc_gap = proc_min_abs, proc_max_abs
+        first(2,proc_gap) = -proc_gap
+        last(2,proc_gap) = -proc_gap
+    end do
+    do ind2 = 1, gp_s(2)
+        first(2+ind2,:) = 0
+        last(2+ind2,:) =  0
+        do ind1 = 1, gp_s(1)
+            ! Compute if I am the first, ie if:
+            ! a - proc_min <= proc_gap <= proc_max,
+            ! b - proc_gap > proc_max_prev -1.
+            do proc_gap = max(proc_min(ind1,ind2), proc_max_prev(ind1,ind2)), proc_max(ind1,ind2)
+                first(2+ind2,proc_gap) =  first(2+ind2,proc_gap)+1
+                first(1,proc_gap) = first(1,proc_gap) + 1
+                first(first(1,proc_gap),proc_gap) = ind1
+            end do
+            ! Compute if I am the last, ie if:
+            ! a - proc_min <= proc_gap <= proc_max,
+            ! b - proc_gap < proc_min_next+1.
+            do proc_gap = proc_min(ind1,ind2), min(proc_min_next(ind1,ind2), proc_max(ind1,ind2))
+                last(2+ind2,proc_gap) =  last(2+ind2,proc_gap)+1
+                last(1,proc_gap) = last(1,proc_gap) + 1
+                last(last(1,proc_gap),proc_gap) = ind1
+            end do
+        end do
+    end do
+
+#ifdef PART_DEBUG
+    do proc_gap = proc_min_abs, proc_max_abs
+        if (first(1,proc_gap)>max_size) then
+            print*, 'too big array on proc = ', cart_rank, ' - proc_gap = ', proc_gap
+            print*, 'it occurs on AC_obtain_senders_group - array concerned : "first"'
+            print*, 'first = ', first(1,proc_gap)
+        end if
+        if (last(1,proc_gap)>max_size) then
+            print*, 'too big array on proc = ', cart_rank, ' - proc_gap = ', proc_gap
+            print*, 'it occurs on AC_obtain_senders_group - array concerned : "last"'
+            print*, 'last = ', last(1,proc_gap)
+        end if
+    end do
+#endif
+
+    ! -- Send information if I am the first or the last --
+    do proc_gap = proc_min_abs, proc_max_abs
+        ! I am the first ?
+        if (first(1,proc_gap)>min_size) then
+            ! Compute the rank of the target processus
+            call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
+            if(rankN /= D_rank(direction)) then
+                call mpi_Isend(first(2,proc_gap), first(1,proc_gap)-1, MPI_INTEGER, rankN, tag_table(1), D_comm(direction), &
+                        & send_request(proc_gap,1), ierr)
+                send_request(proc_gap,3) = 1
+            else
+                indice = min_size
+                do ind2 = 1, gp_s(2)
+                    do ind1 = 1, first(2+ind2,proc_gap)
+                        indice = indice+1
+                        rece_proc(1,first(indice,proc_gap),ind2) = -proc_gap
+                    end do
+                end do
+            end if
+        end if
+        ! I am the last ?
+        if (last(1,proc_gap)>min_size) then
+            ! Compute the rank of the target processus
+            call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rankN, ierr)
+            if(rankN /= D_rank(direction)) then
+                call mpi_Isend(last(2,proc_gap), last(1,proc_gap)-1, MPI_INTEGER, rankN, tag_table(2), D_comm(direction), &
+                        & send_request(proc_gap,2), ierr)
+                send_request(proc_gap,3) = send_request(proc_gap, 3) + 2
+            else
+                indice = min_size
+                do ind2 = 1, gp_s(2)
+                    do ind1 = 1, last(2+ind2,proc_gap)
+                        indice = indice+1
+                        rece_proc(2,last(indice,proc_gap),ind2) = -proc_gap
+                    end do
+                end do
+            end if
+        end if
+    end do
+
+
+    ! -- Receive it --
+    ! size_max = size(rece_buffer) ! 2 + 2*gp_s(1)*gp_s(2)
+    max_size = max_size-1
+    do while(any(rece_proc(1,:,:) == 3*mesh_sc%N(direction)))
+        call mpi_recv(rece_buffer(1), max_size, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr)
+        indice = min_size-1
+        do ind2 = 1, gp_s(2)
+            do ind1 = 1, rece_buffer(1+ind2)
+                indice = indice+1
+                rece_proc(1,rece_buffer(indice),ind2) = rece_buffer(1)
+            end do
+        end do
+    end do
+    do while(any(rece_proc(2,:,:) == 3*mesh_sc%N(direction)))
+        call mpi_recv(rece_buffer(1), max_size, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr)
+        indice = min_size-1
+        do ind2 = 1, gp_s(2)
+            do ind1 = 1, rece_buffer(1+ind2)
+                indice = indice+1
+                rece_proc(2,rece_buffer(indice),ind2) = rece_buffer(1)
+            end do
+        end do
+    end do
+
+    ! -- Free Isend buffer --
+    do proc_gap = proc_min_abs, proc_max_abs
+        select case (send_request(proc_gap,3))
+            case (3)
+                call mpi_wait(send_request(proc_gap,1), statut, ierr)
+                call mpi_wait(send_request(proc_gap,2), statut, ierr)
+            case (2)
+                call mpi_wait(send_request(proc_gap,2), statut, ierr)
+            case (1)
+                call mpi_wait(send_request(proc_gap,1), statut, ierr)
+        end select
+    end do
+
+    deallocate(first)
+    deallocate(last)
+    deallocate(send_request)
+
+end subroutine AC_obtain_senders_com
+
+
+end module advec_remesh_line
+!> @}
diff --git a/HySoP/src/scalesReduced/particles/advec_remesh_Mprime.f90 b/HySoP/src/scalesReduced/particles/advec_remesh_Mprime.f90
new file mode 100644
index 0000000000000000000000000000000000000000..9725e81b227e4ab2b2c00f9cbc57660a53bed8fd
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_remesh_Mprime.f90
@@ -0,0 +1,1191 @@
+!USEFORTEST advec
+!> @addtogroup part
+
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_remeshing_formula
+!
+!
+! DESCRIPTION:
+!> This module gathers all the remeshing formula of ``Mprime'' family.
+!! These interpolation polynoms allow to re-distribute particles on mesh grid at each
+!! iteration.
+!! @details
+!! It provides M'6 and M'8 remeshing formula.
+!!   These M' formula appear as only involving stability condition depending on
+!! velocity gradient rather than CFL number. Thus, they allow us to use large
+!! time-step. The stability constant is equal to 1 (ie the condition is
+!! dt < gradiend(velocity)) where the numerical gradient is computed with
+!! finite-difference scheme.
+!!   In praxis, the' M'6 method appears to offer the better ratio between
+!! precision and numerical cost. It is locally of order 4 and generically of order
+!! 2 (the spatial order is decreased in location associated with important
+!! velocity variation).
+!!   The local accuracy of M'8 scheme can be better.
+!!     This module also provides some wrapper to remesh a complete line
+!! of particles (with the different formula) and to do it either on a
+!! array or into a array of pointer to reals. In order to gather
+!! communications between different lines of particles, it is better to
+!! use continguous memory space for mesh point which belong to the same
+!! processes and thus to use an array of pointer to easily deal with it.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_remeshing_Mprime
+
+    use structure_tools
+    use advec_common_line
+
+    implicit none
+
+    ! #############################
+    ! ########## Hearder ##########
+    ! #############################
+
+!   ! ===== Abstract profile of M' remeshing subroutines =====
+!   ! --- Abstract profile of subroutine used to remesh a line of particles ---
+!   ! Variant: the buffer is an array of pointer (and not a pointer to an array)
+!   abstract interface
+!       subroutine AC_remesh_Mprime(p_pos_adim, scal1D, bl_type, bl_tag, ind_min, buffer)
+!           use precision_tools
+!           use advec_variables
+
+!           implicit none
+
+!           ! Input/Output
+!           real(WP), intent(in)                        :: p_pos_adim
+!           real(WP), intent(in)                        :: scal1D
+!           type(real_pter), dimension(:), intent(inout):: buffer
+!       end subroutine AC_remesh_Mprime
+!   end interface
+
+    ! ===== Public variable =====
+    !> To know wich diffusion coefficient to use.
+    integer, public                                     :: sc_remesh_ind
+    integer, public                                     :: current_dir = 1
+
+    ! ===== Public procedures =====
+    ! Wrapper to M' remeshing formula (actually pointer to the right subroutine)
+    procedure(AC_remesh_Mstar6_array), pointer, public ::  AC_remesh_Mprime_array  => null()   !> wrapper to M' remeshing formula - buffer are stored in classical array
+    procedure(AC_remesh_Mstar6_pter),  pointer, public ::  AC_remesh_Mprime_pter   => null()   !> wrapper to M' remeshing formula - buffer are stored via an array of pointer
+    ! To get the right "line remeshing" wrapper
+    public                              :: AC_remesh_init_Mprime
+    !----- M'4 remeshing formula -----
+    public                              :: AC_remesh_Mprime4    ! use 4 grid point, 2 for each side of the particle.
+    public                              :: AC_remesh_Mprime4_array      ! use 4 grid point, 2 for each side of the particle.
+    public                              :: AC_remesh_Mprime4_pter       ! use 4 grid point, 2 for each side of the particle.
+    !----- M'6 remeshing formula -----
+    public                              :: AC_remesh_Mstar6    ! use 6 grid point, 3 for each side of the particle.
+    public                              :: AC_remesh_Mstar6_array      ! use 6 grid point, 3 for each side of the particle.
+    public                              :: AC_remesh_Mstar6_pter       ! use 6 grid point, 3 for each side of the particle.
+    !----- M'8 remeshing formula -----
+    public                              :: AC_remesh_Mprime8    ! use 8 grid point, 4 for each side of the particle.
+    public                              :: AC_remesh_Mprime8_array
+    public                              :: AC_remesh_Mprime8_pter
+
+
+    !===== Interface =====
+    ! -- M'4: array of real or of pointer --
+    interface AC_remesh_Mprime4
+        module procedure AC_remesh_Mprime4_pter, AC_remesh_Mprime4_array
+    end interface AC_remesh_Mprime4
+    ! -- M'6: array of real or of pointer --
+    interface AC_remesh_Mstar6
+        module procedure AC_remesh_Mstar6_pter, AC_remesh_Mstar6_array
+    end interface AC_remesh_Mstar6
+
+    ! -- M'8: array of real or of pointer --
+    interface AC_remesh_Mprime8
+        module procedure AC_remesh_Mprime8_pter, AC_remesh_Mprime8_array
+    end interface AC_remesh_Mprime8
+
+contains
+
+! ===================================================================
+! ============     Pointer to the right remesh formula    ===========
+! ===================================================================
+
+subroutine AC_remesh_init_Mprime()
+
+    use advec_variables         ! solver context
+
+    select case(trim(type_solv))
+    case ('d_M4')
+        AC_remesh_Mprime_array => AC_remesh_Mprime4_diff_array
+        AC_remesh_Mprime_pter  => AC_remesh_Mprime4_diff_pter
+    case ('p_M4')
+        AC_remesh_Mprime_array => AC_remesh_Mprime4_array
+        AC_remesh_Mprime_pter  => AC_remesh_Mprime4_pter
+    case ('p_M8')
+        AC_remesh_Mprime_array => AC_remesh_Mprime8_array
+        AC_remesh_Mprime_pter  => AC_remesh_Mprime8_pter
+    case ('p_44')
+        AC_remesh_Mprime_array => AC_remesh_L4_4_array
+        AC_remesh_Mprime_pter  => AC_remesh_L4_4_pter
+    case ('p_64')
+        AC_remesh_Mprime_array => AC_remesh_L6_4_array
+        AC_remesh_Mprime_pter  => AC_remesh_L6_4_pter
+    case ('p_66')
+        AC_remesh_Mprime_array => AC_remesh_L6_6_array
+        AC_remesh_Mprime_pter  => AC_remesh_L6_6_pter
+    case ('p_84')
+        AC_remesh_Mprime_array => AC_remesh_L8_4_array
+        AC_remesh_Mprime_pter  => AC_remesh_L8_4_pter
+    ! To ensure retro-compatibility
+    case ('p_L4')
+        AC_remesh_Mprime_array => AC_remesh_L4_4_array
+        AC_remesh_Mprime_pter  => AC_remesh_L4_4_pter
+    case ('p_L6')
+        AC_remesh_Mprime_array => AC_remesh_L6_6_array
+        AC_remesh_Mprime_pter  => AC_remesh_L6_6_pter
+    case default
+        AC_remesh_Mprime_array => AC_remesh_Mstar6_array
+        AC_remesh_Mprime_pter  => AC_remesh_Mstar6_pter
+    end select
+
+end subroutine AC_remesh_init_Mprime
+
+! =========================================================================
+! ============     Interpolation polynom used for remeshing    ============
+! =========================================================================
+
+!> M'4 remeshing formula - version for array of real
+!! @author Chloe Mimeau, LJK
+!!      @param[in]       dir     = current direction
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_Mprime4_array(dir, pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: j0, j1                   ! indice of the nearest mesh points
+    real(WP)    :: bM, b0, bP, bP2          ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    !bM  = ((2.-(y0+1.))**2 * (1.-(y0+1.)))/2.
+    bM = (y0 * (y0 * (-y0 + 2.) - 1.)) / 2.
+    !bP = 1.-2.5*(1.-y0)**2 + 1.5*(1.-y0)**3
+    bP = (y0 * (y0 * (-3. * y0 + 4.) + 1.)) / 2.
+    !bP2 = ((2.-(2.-y0))**2 * (1.-(2.-y0)))/2.
+    bP2 = (y0 * y0 * (y0 - 1.)) / 2.
+    !b0 = 1.- 2.5*y0**2 + 1.5*y0**3
+    b0 = 1. - (bM+bP+bP2)
+
+    ! remeshing
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    buffer(j1) = buffer(j1) + sca*bM
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(j1) = buffer(j1) + sca*b0
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    buffer(j1) = buffer(j1) + sca*bP
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    buffer(j1) = buffer(j1) + sca*bP2
+
+end subroutine AC_remesh_Mprime4_array
+
+
+!> M'4 remeshing formula. - version for array of pointer
+!! @author Chloe Mimeau, LJK
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       translat= translation to convert adimensionned particle position to the proper array index
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_Mprime4_pter(pos_adim, translat, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                                        :: pos_adim, sca
+    type(real_pter), dimension(:), intent(inout)                :: buffer
+    integer, intent(in)                                         :: translat
+    ! Other local variables
+    integer     :: j0                       ! indice of the nearest mesh points
+    real(WP)    :: bM, b0, bP, bP2          ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! translation to obtain the array index
+    j0 = j0 + translat
+
+    ! Interpolation weights
+    !bM  = ((2.-(y0+1.))**2 * (1.-(y0+1.)))/2.
+    bM = (y0 * (y0 * (-y0 + 2.) - 1.)) / 2.
+    !bP = 1.-2.5*(1.-y0)**2 + 1.5*(1.-y0)**3
+    bP = (y0 * (y0 * (-3. * y0 + 4.) + 1.)) / 2.
+    !bP2 = ((2.-(2.-y0))**2 * (1.-(2.-y0)))/2.
+    bP2 = (y0 * y0 * (y0 - 1.)) / 2.
+    !b0 = 1.- 2.5*y0**2 + 1.5*y0**3
+    b0 = 1. - (bM+bP+bP2)
+
+    ! remeshing
+    buffer(j0-1)%pter = buffer(j0-1)%pter + sca*bM
+    buffer(j0  )%pter = buffer(j0  )%pter + sca*b0
+    buffer(j0+1)%pter = buffer(j0+1)%pter + sca*bP
+    buffer(j0+2)%pter = buffer(j0+2)%pter + sca*bP2
+
+end subroutine AC_remesh_Mprime4_pter
+
+
+!> M'4 remeshing formula with diffusion - version for array of real
+!! @author Jean-baptiste Lagaert, LEGI
+!!      @param[in]       dir     = current direction
+!!      @param[in]       diff_dt_dx = to take in account diffusion, diff = (diffusivity*time step)/((space step)^2)
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_Mprime4_diff_array(dir, pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: j0, j1                   ! indice of the nearest mesh points
+    real(WP)    :: bM, b0, bP, bP2          ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+    real(WP)    :: diff1, diff2             ! remeshing correction to take into account for diffusion term
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Compute coefficient for diffusion part
+    diff1 = 1.5*(1.-0.5*4.*sc_diff_dt_dx(sc_remesh_ind,dir))
+    diff2 = 0.5*(1.-1.5*4.*sc_diff_dt_dx(sc_remesh_ind,dir))
+
+    ! Interpolation weights
+    !bM = .5*((2.-(y0+1))**2)*(diff1*(2.-(y0+1))/3.-diff2*(y0+1))
+    bM = (-1./6)*((y0-1.)**2)*(diff1*(y0-1.)+diff2*(3.*y0+3.))
+    !b0 =.5*((2.-y0)**2)*(diff1*(2-y0)/3.-diff2*y0)-((1.-y0)**2)*(2.*diff1*(1.-y0)/3.-2.*diff2*y0)
+    b0 =(y0**2)*((diff1*(0.5*y0-1.))+(diff2*(1.5*y0-2.))) + (diff1*2./3._WP)
+    !bP =.5*((2.-(1-y0))**2)*(diff1*(2-(1-y0))/3.-diff2*(1-y0))-((1.-(1-y0))**2)*(2.*diff1*(1.-(1-y0))/3.-2.*diff2*(1-y0))
+    bP = diff1*(y0*(y0*(0.5-0.5*y0)+0.5)+(1._WP/6._WP))+diff2*(y0*(y0*(2.5-1.5*y0)-0.5)-0.5)
+    !bP2= .5*((2.-(2-y0))**2)*(diff1*(2.-(2-y0))/3.-diff2*(2-y0))
+    bP2 = 0.5_WP*(y0**2)*((1._WP/3._WP)*diff1*y0 - diff2*(2.-y0))
+    !bP = 1._WP - (bM + b0 + bP2)
+
+
+    ! remeshing
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    buffer(j1) = buffer(j1) + sca*bM
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(j1) = buffer(j1) + sca*b0
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    buffer(j1) = buffer(j1) + sca*bP
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    buffer(j1) = buffer(j1) + sca*bP2
+
+end subroutine AC_remesh_Mprime4_diff_array
+
+
+!> M'4 remeshing formula with diffusion - version for array of pointer.
+!! @author Jean-baptiste Lagaert, LEGI
+!!      @param[in]       diff    = diffusivity
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       translat= translation to convert adimensionned particle position to the proper array index
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_Mprime4_diff_pter(pos_adim, translat, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                                        :: pos_adim, sca
+    type(real_pter), dimension(:), intent(inout)                :: buffer
+    integer, intent(in)                                         :: translat
+    ! Other local variables
+    integer     :: j0                       ! indice of the nearest mesh points
+    real(WP)    :: bM, b0, bP, bP2          ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+    real(WP)    :: diff1, diff2             ! remeshing correction to take into account for diffusion term
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! translation to obtain the array index
+    j0 = j0 + translat
+
+    ! Compute coefficient for diffusion part
+    diff1 = 1.5*(1.-0.5*4.*sc_diff_dt_dx(sc_remesh_ind,current_dir))
+    diff2 = 0.5*(1.-1.5*4.*sc_diff_dt_dx(sc_remesh_ind,current_dir))
+
+    ! Interpolation weights
+    !bM = .5*((2.-(y0+1))**2)*(diff1*(2.-(y0+1))/3.-diff2*(y0+1))
+    bM = (-1./6)*((y0-1.)**2)*(diff1*(y0-1.)+diff2*(3.*y0+3.))
+    !b0 =.5*((2.-y0)**2)*(diff1*(2-y0)/3.-diff2*y0)-((1.-y0)**2)*(2.*diff1*(1.-y0)/3.-2.*diff2*y0)
+    b0 =(y0**2)*((diff1*(0.5*y0-1.))+(diff2*(1.5*y0-2.))) + (diff1*2./3._WP)
+    !bP =.5*((2.-(1-y0))**2)*(diff1*(2-(1-y0))/3.-diff2*(1-y0))-((1.-(1-y0))**2)*(2.*diff1*(1.-(1-y0))/3.-2.*diff2*(1-y0))
+    bP = diff1*(y0*(y0*(0.5-0.5*y0)+0.5)+(1._WP/6._WP))+diff2*(y0*(y0*(2.5-1.5*y0)-0.5)-0.5)
+    !bP2= .5*((2.-(2-y0))**2)*(diff1*(2.-(2-y0))/3.-diff2*(2-y0))
+    bP2 = 0.5_WP*(y0**2)*((1._WP/3._WP)*diff1*y0 - diff2*(2.-y0))
+    !bP = 1._WP - (bM + b0 + bP2)
+
+    ! remeshing
+    buffer(j0-1)%pter = buffer(j0-1)%pter + sca*bM
+    buffer(j0  )%pter = buffer(j0  )%pter + sca*b0
+    buffer(j0+1)%pter = buffer(j0+1)%pter + sca*bP
+    buffer(j0+2)%pter = buffer(j0+2)%pter + sca*bP2
+
+end subroutine AC_remesh_Mprime4_diff_pter
+
+
+!> M'6 remeshing formula - version for array of real
+!! @author Jean-Baptiste Lagaert, LEGI/LJK
+!!      @param[in]       dir     = current direction
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_Mstar6_array(dir, pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: j0, j1                   ! indice of the nearest mesh points
+    real(WP)    :: bM, bM2, b0, bP, bP2, bP3! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    !bM2 =-(((y0+2.)-2)*(5.*(y0+2.)-8.)*((y0+2.)-3.)**3)/24.
+    bM2 = y0*(2. + y0*(-1. + y0*(-9. + (13. - 5.*y0)*y0)))/24.
+    !bM  =(y0+1.-1.)*(y0+1.-2.)*(25.*(y0+1.)**3-114.*(y0+1.)**2+153.*(y0+1.)-48.)/24.
+    bM = y0*(-16. + y0*(16. + y0*(39. + y0*(-64. + 25.*y0))))/24.
+    !bP  =-((1.-y0)-1.)*(25.*(1.-y0)**4-38.*(1.-y0)**3-3.*(1.-y0)**2+12.*(1.-y0)+12)/12.
+    bP = ( y0*(8. + y0*(8. + y0*(33. + y0*(-62. + 25.*y0)))))/12.
+    !bP2 = ((2.-y0)-1.)*((2.-y0)-2.)*(25.*(2.-y0)**3-114.*(2.-y0)**2+153.*(2.-y0)-48.)/24.
+    bP2 = (y0*(-2. + y0*(-1. + y0*(-33. + (61. - 25.*y0)*y0))))/24.
+    !bP3 =-(((3.-y0)-2)*(5.*(3.-y0)-8.)*((3.-y0)-3.)**3)/24.
+    bP3 = (y0**3)*(7. + y0*(5.*y0 - 12.))/24.
+    !b0  =-(y0-1.)*(25.*y0**4-38.*y0**3-3.*y0**2+12.*y0+12)/12.
+    !b0 = (12. + y0**2*(-15. + y0*(-35. + (63. - 25.*y0)*y0)))/12.
+    b0 = 1. - (bM2+bM+bP+bP2+bP3)
+
+    ! remeshing
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1  ! j0-2
+    buffer(j1) = buffer(j1) + sca*bM2
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    buffer(j1) = buffer(j1) + sca*bM
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(j1) = buffer(j1) + sca*b0
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    buffer(j1) = buffer(j1) + sca*bP
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    buffer(j1) = buffer(j1) + sca*bP2
+    j1 = modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
+    buffer(j1) = buffer(j1) + sca*bP3
+
+end subroutine AC_remesh_Mstar6_array
+
+
+!> M'6 remeshing formula (order is more than 2, JM Ethancelin is working on
+!! determining order). - version for array of pointer
+!! @author Jean-Baptiste Lagaert, LEGI/LJK
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       translat= translation to convert adimensionned particle position to the proper array index
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_Mstar6_pter(pos_adim, translat, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                                        :: pos_adim, sca
+    type(real_pter), dimension(:), intent(inout)                :: buffer
+    integer, intent(in)                                         :: translat
+    ! Other local variables
+    integer     :: j0                       ! indice of the nearest mesh points
+    real(WP)    :: bM, bM2, b0, bP, bP2, bP3! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! translation to obtain the array index
+    j0 = j0 + translat
+
+    ! Interpolation weights
+    !bM2 =-(((y0+2.)-2)*(5.*(y0+2.)-8.)*((y0+2.)-3.)**3)/24.
+    bM2 = y0*(2. + y0*(-1. + y0*(-9. + (13. - 5.*y0)*y0)))/24.
+    !bM  =(y0+1.-1.)*(y0+1.-2.)*(25.*(y0+1.)**3-114.*(y0+1.)**2+153.*(y0+1.)-48.)/24.
+    bM = y0*(-16. + y0*(16. + y0*(39. + y0*(-64. + 25.*y0))))/24.
+    !bP  =-((1.-y0)-1.)*(25.*(1.-y0)**4-38.*(1.-y0)**3-3.*(1.-y0)**2+12.*(1.-y0)+12)/12.
+    bP = ( y0*(8. + y0*(8. + y0*(33. + y0*(-62. + 25.*y0)))))/12.
+    !bP2 = ((2.-y0)-1.)*((2.-y0)-2.)*(25.*(2.-y0)**3-114.*(2.-y0)**2+153.*(2.-y0)-48.)/24.
+    bP2 = (y0*(-2. + y0*(-1. + y0*(-33. + (61. - 25.*y0)*y0))))/24.
+    !bP3 =-(((3.-y0)-2)*(5.*(3.-y0)-8.)*((3.-y0)-3.)**3)/24.
+    bP3 = (y0**3)*(7. + y0*(5.*y0 - 12.))/24.
+    !b0  =-(y0-1.)*(25.*y0**4-38.*y0**3-3.*y0**2+12.*y0+12)/12.
+    !b0 = (12. + y0**2*(-15. + y0*(-35. + (63. - 25.*y0)*y0)))/12.
+    b0 = 1. - (bM2+bM+bP+bP2+bP3)
+
+    !print *, j0, pos_adim
+
+    ! remeshing
+    buffer(j0-2)%pter = buffer(j0-2)%pter + sca*bM2
+    buffer(j0-1)%pter = buffer(j0-1)%pter + sca*bM
+    buffer(j0  )%pter = buffer(j0  )%pter + sca*b0
+    buffer(j0+1)%pter = buffer(j0+1)%pter + sca*bP
+    buffer(j0+2)%pter = buffer(j0+2)%pter + sca*bP2
+    buffer(j0+3)%pter = buffer(j0+3)%pter + sca*bP3
+
+end subroutine AC_remesh_Mstar6_pter
+
+
+!> Lambda(4,4) remeshing formula (without correction), order = 4 everywhere - version for array of real
+!! @author Jean-Baptiste Lagaert, LEGI/LJK
+!!      @param[in]       dir     = current direction
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_L4_4_array(dir, pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: j0, j1                   ! indice of the nearest mesh points
+    real(WP)    :: bM, bM2, b0, bP, bP2, bP3! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    bM2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-46. * y0 + 207.) - 354.) + 273.) - 80.) + 1.) - 2.)- 1.) + 2.)) / 24.
+    bM  = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(230. * y0 - 1035.) +1770.) - 1365.) + 400.) - 4.) + 4.) + 16.) - 16.)) / 24.
+    b0  = (y0* y0*(y0*y0* (y0*(y0*(y0*(y0*(-460.* y0 + 2070.) - 3540.) + 2730.) - 800.) + 6.) - 30.)+ 24.) / 24.
+    bP  = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(460. * y0 - 2070.) + 3540.) - 2730.) + 800.) - 4.) - 4.) + 16.) + 16.)) / 24.
+    !bP2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0 * (-230. * y0 + 1035.) - 1770.) + 1365.) - 400.) + 1.) + 2.) - 1.) - 2.)) / 24.
+    bP3 = (y0*y0*y0*y0*y0*(y0*(y0 * (y0 * (46. * y0 - 207.) + 354.) - 273.) + 80.)) / 24.
+    bP2 = 1. - (bM2+bM+bP+b0+bP3)
+
+    ! remeshing
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1  ! j0-2
+    buffer(j1) = buffer(j1) + sca*bM2
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    buffer(j1) = buffer(j1) + sca*bM
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(j1) = buffer(j1) + sca*b0
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    buffer(j1) = buffer(j1) + sca*bP
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    buffer(j1) = buffer(j1) + sca*bP2
+    j1 = modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
+    buffer(j1) = buffer(j1) + sca*bP3
+
+end subroutine AC_remesh_L4_4_array
+
+
+!> Lambda(4,4) uncorrected remeshing formula (order 4 everywhere)  - version for array of pointer
+!! @author Jean-Baptiste Lagaert, LEGI/LJK
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       translat= translation to convert adimensionned particle position to the proper array index
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_L4_4_pter(pos_adim, translat, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                                        :: pos_adim, sca
+    type(real_pter), dimension(:), intent(inout)                :: buffer
+    integer, intent(in)                                         :: translat
+    ! Other local variables
+    integer     :: j0                       ! indice of the nearest mesh points
+    real(WP)    :: bM, bM2, b0, bP, bP2, bP3! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! translation to obtain the array index
+    j0 = j0 + translat
+
+    ! Interpolation weights
+    bM2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-46. * y0 + 207.) - 354.) + 273.) - 80.) + 1.) - 2.)- 1.) + 2.)) / 24.
+    bM  = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(230. * y0 - 1035.) +1770.) - 1365.) + 400.) - 4.) + 4.) + 16.) - 16.)) / 24.
+    b0  = (y0* y0*(y0*y0* (y0*(y0*(y0*(y0*(-460.* y0 + 2070.) - 3540.) + 2730.) - 800.) + 6.) - 30.)+ 24.) / 24.
+    bP  = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(460. * y0 - 2070.) + 3540.) - 2730.) + 800.) - 4.) - 4.) + 16.) + 16.)) / 24.
+    !bP2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0 * (-230. * y0 + 1035.) - 1770.) + 1365.) - 400.) + 1.) + 2.) - 1.) - 2.)) / 24.
+    bP3 = (y0*y0*y0*y0*y0*(y0*(y0 * (y0 * (46. * y0 - 207.) + 354.) - 273.) + 80.)) / 24.
+    bP2 = 1. - (bM2+bM+bP+b0+bP3)
+
+    ! remeshing
+    buffer(j0-2)%pter = buffer(j0-2)%pter + sca*bM2
+    buffer(j0-1)%pter = buffer(j0-1)%pter + sca*bM
+    buffer(j0  )%pter = buffer(j0  )%pter + sca*b0
+    buffer(j0+1)%pter = buffer(j0+1)%pter + sca*bP
+    buffer(j0+2)%pter = buffer(j0+2)%pter + sca*bP2
+    buffer(j0+3)%pter = buffer(j0+3)%pter + sca*bP3
+
+end subroutine AC_remesh_L4_4_pter
+
+
+!> Lambda(6,4) remeshing formula (without correction),
+!! order = 6 locally with regularity C4- version for array of real
+!! @author Chloe Mimeau, LJK
+!!      @param[in]       dir     = current direction
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_L6_4_array(dir, pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: j0, j1            ! indice of the nearest mesh points
+    real(WP)    :: bM3, bM2, bM, b0  ! interpolation weight for the particles
+    real(WP)    :: bP, bP2, bP3, bP4 ! interpolation weight for the particles
+    real(WP)    :: y0                ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    bM3 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(290. * y0 - 1305.) + 2231.) &
+        & - 1718.) + 500.) - 5.) + 15.) + 4.) - 12.)) / 720.
+    bM2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-2030. * y0 + 9135.) - 15617.) &
+        & + 12027.) - 3509.) + 60.) - 120.) - 54.) + 108.)) / 720.
+    bM = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(6090. * y0 - 27405.) + 46851.) &
+        & - 36084.) + 10548.) - 195.) + 195.) + 540.) - 540.)) / 720.
+    b0 = (y0*y0*(y0*y0*(y0*(y0*(y0*(y0*(-10150. * y0 + 45675.) - 78085.) &
+        & + 60145.) - 17605.) + 280.) - 980.) + 720.) / 720.
+    bP = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(10150. * y0 - 45675.) + 78085.) &
+        & - 60150.) + 17620.) - 195.) - 195.) + 540.) + 540.)) / 720.
+    bP2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-6090. * y0 + 27405.) - 46851.) &
+        & + 36093.) - 10575.) + 60.) + 120.) - 54.) - 108.)) / 720.
+    bP3 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(2030. * y0 - 9135.) + 15617.) &
+        & - 12032.) + 3524.) - 5.) - 15.) + 4.) + 12.)) / 720.
+    bP4 = (y0*y0*y0*y0*y0*(y0*(y0*(y0*(-290. * y0 + 1305.) - 2231.) + 1719.) &
+        & - 503.)) / 720.
+!    b0 = 1. - (bM3+bM2+bM+bP+bP2+bP3+bP4)
+
+    ! remeshing
+    j1 = modulo(j0-4,mesh_sc%N(dir))+1  ! j0-3
+    buffer(j1) = buffer(j1) + sca*bM3
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1  ! j0-2
+    buffer(j1) = buffer(j1) + sca*bM2
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    buffer(j1) = buffer(j1) + sca*bM
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(j1) = buffer(j1) + sca*b0
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    buffer(j1) = buffer(j1) + sca*bP
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    buffer(j1) = buffer(j1) + sca*bP2
+    j1 = modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
+    buffer(j1) = buffer(j1) + sca*bP3
+    j1 = modulo(j0+3,mesh_sc%N(dir))+1  ! j0+4
+    buffer(j1) = buffer(j1) + sca*bP4
+
+end subroutine AC_remesh_L6_4_array
+
+
+!> Lambda(6,4) uncorrected remeshing formula (order 6 locally with regularity C4)
+!! - version for array of pointer
+!! @author Chloe Mimeau, LJK
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       translat= translation to convert adimensionned particle position to the proper array index
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_L6_4_pter(pos_adim, translat, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                             :: pos_adim, sca
+    type(real_pter), dimension(:), intent(inout)     :: buffer
+    integer, intent(in)                              :: translat
+    ! Other local variables
+    integer     :: j0                ! indice of the nearest mesh points
+    real(WP)    :: bM3, bM2, bM, b0  ! interpolation weight for the particles
+    real(WP)    :: bP, bP2, bP3, bP4 ! interpolation weight for the particles
+    real(WP)    :: y0                ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! translation to obtain the array index
+    j0 = j0 + translat
+
+    ! Interpolation weights
+    bM3 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(290. * y0 - 1305.) + 2231.) &
+        & - 1718.) + 500.) - 5.) + 15.) + 4.) - 12.)) / 720.
+    bM2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-2030. * y0 + 9135.) - 15617.) &
+        & + 12027.) - 3509.) + 60.) - 120.) - 54.) + 108.)) / 720.
+    bM = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(6090. * y0 - 27405.) + 46851.) &
+        & - 36084.) + 10548.) - 195.) + 195.) + 540.) - 540.)) / 720.
+    b0 = (y0*y0*(y0*y0*(y0*(y0*(y0*(y0*(-10150. * y0 + 45675.) - 78085.) &
+        & + 60145.) - 17605.) + 280.) - 980.) + 720.) / 720.
+    bP = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(10150. * y0 - 45675.) + 78085.) &
+        & - 60150.) + 17620.) - 195.) - 195.) + 540.) + 540.)) / 720.
+    bP2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-6090. * y0 + 27405.) - 46851.) &
+        & + 36093.) - 10575.) + 60.) + 120.) - 54.) - 108.)) / 720.
+    bP3 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(2030. * y0 - 9135.) + 15617.) &
+        & - 12032.) + 3524.) - 5.) - 15.) + 4.) + 12.)) / 720.
+    bP4 = (y0*y0*y0*y0*y0*(y0*(y0*(y0*(-290. * y0 + 1305.) - 2231.) + 1719.) &
+        & - 503.)) / 720.
+!    b0 = 1. - (bM3+bM2+bM+bP+bP2+bP3+bP4)
+
+
+    ! remeshing
+    buffer(j0-3)%pter = buffer(j0-3)%pter + sca*bM3
+    buffer(j0-2)%pter = buffer(j0-2)%pter + sca*bM2
+    buffer(j0-1)%pter = buffer(j0-1)%pter + sca*bM
+    buffer(j0  )%pter = buffer(j0  )%pter + sca*b0
+    buffer(j0+1)%pter = buffer(j0+1)%pter + sca*bP
+    buffer(j0+2)%pter = buffer(j0+2)%pter + sca*bP2
+    buffer(j0+3)%pter = buffer(j0+3)%pter + sca*bP3
+    buffer(j0+4)%pter = buffer(j0+4)%pter + sca*bP4
+
+end subroutine AC_remesh_L6_4_pter
+
+
+!> Lambda(6,6) remeshing formula (without correction), order = 6 everywhere - version for array of real
+!! @author Jean-Baptiste Lagaert, LEGI/LJK
+!!      @param[in]       dir     = current direction
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_L6_6_array(dir, pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: j0, j1                   ! indice of the nearest mesh points
+    real(WP)    :: bM, bM2, bM3, b0, bP, bP2, bP3, bP4! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    bM3 = (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (3604. * y0 - 23426.) + 63866.) &
+             & - 93577.) + 77815.) - 34869.) + 6587.) + 1.) - 3.) - 5.) + 15.) + &
+             & 4.) - 12.)) / 720.
+    bM2 = (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (-25228. * y0 + 163982.) - 447062.) &
+             & + 655039.) - 544705.) + 244083.) - 46109.) - 6.) + 12.) + 60.) - &
+             & 120.) - 54.) + 108.)) / 720.
+    bM  = (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (75684. * y0 - 491946.) + 1341186.) &
+             & - 1965117.) + 1634115.) - 732249.) + 138327.) + 15.) - 15.) - 195.) &
+             & + 195.) + 540.) - 540.)) / 720.
+    b0  = (y0 * y0 * (y0 * y0 * (y0 * y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (-126140. * y0 + 819910.) - 2235310.) &
+             & + 3275195.) - 2723525.) + 1220415.) - 230545.) - 20.) + 280.) - &
+             & 980.) + 720.) / 720.
+    !bP  = (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (126140. * y0 - 819910.) + 2235310.) &
+    !         & - 3275195.) + 2723525.) - 1220415.) + 230545.) + 15.) + 15.) - &
+    !         & 195.) - 195.) + 540.) + 540.)) / 720.
+    bP2 = (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (-75684. * y0 + 491946.) - 1341186.) &
+             & + 1965117.) - 1634115.) + 732249.) - 138327.) - 6.) - 12.) + 60.) + &
+             & 120.) - 54.) - 108.)) / 720.
+    bP3 = (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (25228. * y0 - 163982.) + 447062.) &
+             & - 655039.) + 544705.) - 244083.) + 46109.) + 1.) + 3.) - 5.) - 15.) &
+             & + 4.) + 12.)) / 720.
+    bp4 = (y0 * y0 * y0 * y0 * y0 * y0 * y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (-3604. * y0 + &
+             & 23426.) - 63866.) + 93577.) - 77815.) + 34869.) - 6587.)) / 720.
+    bP = 1. - (bM3+bM2+bM+b0+bP2+bP3+bP4)
+
+    ! remeshing
+    j1 = modulo(j0-4,mesh_sc%N(dir))+1  ! j0-3
+    buffer(j1) = buffer(j1) + sca*bM3
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1  ! j0-2
+    buffer(j1) = buffer(j1) + sca*bM2
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    buffer(j1) = buffer(j1) + sca*bM
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(j1) = buffer(j1) + sca*b0
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    buffer(j1) = buffer(j1) + sca*bP
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    buffer(j1) = buffer(j1) + sca*bP2
+    j1 = modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
+    buffer(j1) = buffer(j1) + sca*bP3
+    j1 = modulo(j0+3,mesh_sc%N(dir))+1  ! j0+4
+    buffer(j1) = buffer(j1) + sca*bP4
+
+end subroutine AC_remesh_L6_6_array
+
+
+!> Lambda(6,6) uncorrected remeshing formula (order 6 everywhere)  - version for array of pointer
+!! @author Jean-Baptiste Lagaert, LEGI/LJK
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       translat= translation to convert adimensionned particle position to the proper array index
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_L6_6_pter(pos_adim, translat, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                                        :: pos_adim, sca
+    type(real_pter), dimension(:), intent(inout)                :: buffer
+    integer, intent(in)                                         :: translat
+    ! Other local variables
+    integer     :: j0                       ! indice of the nearest mesh points
+    real(WP)    :: bM, bM2, bM3, b0, bP, bP2, bP3, bP4! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! translation to obtain the array index
+    j0 = j0 + translat
+
+    ! Interpolation weights
+    bM3 = (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (3604. * y0 - 23426.) + 63866.) &
+             & - 93577.) + 77815.) - 34869.) + 6587.) + 1.) - 3.) - 5.) + 15.) + &
+             & 4.) - 12.)) / 720.
+    bM2 = (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (-25228. * y0 + 163982.) - 447062.) &
+             & + 655039.) - 544705.) + 244083.) - 46109.) - 6.) + 12.) + 60.) - &
+             & 120.) - 54.) + 108.)) / 720.
+    bM  = (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (75684. * y0 - 491946.) + 1341186.) &
+             & - 1965117.) + 1634115.) - 732249.) + 138327.) + 15.) - 15.) - 195.) &
+             & + 195.) + 540.) - 540.)) / 720.
+    b0  = (y0 * y0 * (y0 * y0 * (y0 * y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (-126140. * y0 + 819910.) - 2235310.) &
+             & + 3275195.) - 2723525.) + 1220415.) - 230545.) - 20.) + 280.) - &
+             & 980.) + 720.) / 720.
+    !bP  = (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (126140. * y0 - 819910.) + 2235310.) &
+    !         & - 3275195.) + 2723525.) - 1220415.) + 230545.) + 15.) + 15.) - &
+    !         & 195.) - 195.) + 540.) + 540.)) / 720.
+    bP2 = (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (-75684. * y0 + 491946.) - 1341186.) &
+             & + 1965117.) - 1634115.) + 732249.) - 138327.) - 6.) - 12.) + 60.) + &
+             & 120.) - 54.) - 108.)) / 720.
+    bP3 = (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (25228. * y0 - 163982.) + 447062.) &
+             & - 655039.) + 544705.) - 244083.) + 46109.) + 1.) + 3.) - 5.) - 15.) &
+             & + 4.) + 12.)) / 720.
+    bp4 = (y0 * y0 * y0 * y0 * y0 * y0 * y0 * (y0 * (y0 * (y0 * (y0 * (y0 * (-3604. * y0 + &
+             & 23426.) - 63866.) + 93577.) - 77815.) + 34869.) - 6587.)) / 720.
+    bP = 1. - (bM3+bM2+bM+b0+bP2+bP3+bP4)
+
+    ! remeshing
+    buffer(j0-3)%pter = buffer(j0-3)%pter + sca*bM3
+    buffer(j0-2)%pter = buffer(j0-2)%pter + sca*bM2
+    buffer(j0-1)%pter = buffer(j0-1)%pter + sca*bM
+    buffer(j0  )%pter = buffer(j0  )%pter + sca*b0
+    buffer(j0+1)%pter = buffer(j0+1)%pter + sca*bP
+    buffer(j0+2)%pter = buffer(j0+2)%pter + sca*bP2
+    buffer(j0+3)%pter = buffer(j0+3)%pter + sca*bP3
+    buffer(j0+4)%pter = buffer(j0+4)%pter + sca*bP4
+
+end subroutine AC_remesh_L6_6_pter
+
+
+!> Lambda(8,4) remeshing formula (without correction),
+!! order = 8 locally with regularity C4 - version for array of real
+!! @author Chloe Mimeau, LJK
+!!      @param[in]       dir     = current direction
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_L8_4_array(dir, pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: j0, j1                 ! indice of the nearest mesh points
+    real(WP)    :: bM4, bM3, bM2, bM, b0  ! interpolation weight for the particles
+    real(WP)    :: bP, bP2, bP3, bP4, bP5 ! interpolation weight for the particles
+    real(WP)    :: y0                     ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    bM4 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-3569. * y0 + 16061.) &
+        & - 27454.) + 21126.) - 6125.) + 49.) - 196.) - 36.) + 144.)) / 40320.
+    bM3 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(32121. * y0 - 144548.) &
+        & + 247074.) - 190092.) + 55125.) - 672.) + 2016.) + 512.) &
+        & - 1536.)) / 40320.
+    bM2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-128484. * y0 + 578188.) &
+        & - 988256.) + 760312.) - 221060.) + 4732.) - 9464.) - 4032.) &
+        & + 8064.)) / 40320.
+    bM = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(299796. * y0 - 1349096.) &
+        & + 2305856.) - 1774136.) + 517580.) - 13664.) + 13664.) &
+        & + 32256.) - 32256.)) / 40320.
+    b0 = (y0*y0*(y0*y0*(y0*(y0*(y0*(y0*(-449694. * y0 + 2023630.) &
+        & - 3458700.) + 2661540.) - 778806.) + 19110.) - 57400.) &
+        & + 40320.) / 40320.
+    bP = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(449694. * y0 - 2023616.) &
+        & + 3458644.) - 2662016.) + 780430.) - 13664.) - 13664.) &
+        & + 32256.) + 32256.)) / 40320.
+    bP2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-299796. * y0 + 1349068.) &
+        & - 2305744.) + 1775032.) - 520660.) + 4732.) + 9464.) - 4032.) &
+        & - 8064.)) / 40320.
+    bP3 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(128484. * y0 - 578168.) &
+        & + 988176.) - 760872.) + 223020.) - 672.) - 2016.) + 512.) &
+        & + 1536.)) / 40320.
+    bP4 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-32121. * y0 + 144541.) &
+        & - 247046.) + 190246.) - 55685.) + 49.) + 196.) - 36.) &
+        & - 144.)) / 40320.
+    bP5 = (y0*y0*y0*y0*y0*(y0*(y0*(y0*(3569. * y0 - 16060.) + 27450.) &
+        & - 21140.) + 6181.)) / 40320.
+!    b0 = 1. - (bM3+bM2+bM+bP+bP2+bP3+bP4)
+
+    ! remeshing
+    j1 = modulo(j0-5,mesh_sc%N(dir))+1  ! j0-4
+    buffer(j1) = buffer(j1) + sca*bM4
+    j1 = modulo(j0-4,mesh_sc%N(dir))+1  ! j0-3
+    buffer(j1) = buffer(j1) + sca*bM3
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1  ! j0-2
+    buffer(j1) = buffer(j1) + sca*bM2
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    buffer(j1) = buffer(j1) + sca*bM
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(j1) = buffer(j1) + sca*b0
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    buffer(j1) = buffer(j1) + sca*bP
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    buffer(j1) = buffer(j1) + sca*bP2
+    j1 = modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
+    buffer(j1) = buffer(j1) + sca*bP3
+    j1 = modulo(j0+3,mesh_sc%N(dir))+1  ! j0+4
+    buffer(j1) = buffer(j1) + sca*bP4
+    j1 = modulo(j0+4,mesh_sc%N(dir))+1  ! j0+5
+    buffer(j1) = buffer(j1) + sca*bP5
+
+
+end subroutine AC_remesh_L8_4_array
+
+
+!> Lambda(8,4) uncorrected remeshing formula (order 8 locally with regularity C4)
+!! - version for array of pointer
+!! @author Chloe Mimeau, LJK
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       translat= translation to convert adimensionned particle position to the proper array index
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_L8_4_pter(pos_adim, translat, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                             :: pos_adim, sca
+    type(real_pter), dimension(:), intent(inout)     :: buffer
+    integer, intent(in)                              :: translat
+    ! Other local variables
+    integer     :: j0                     ! indice of the nearest mesh points
+    real(WP)    :: bM4, bM3, bM2, bM, b0  ! interpolation weight for the particles
+    real(WP)    :: bP, bP2, bP3, bP4, bP5 ! interpolation weight for the particles
+    real(WP)    :: y0                     ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! translation to obtain the array index
+    j0 = j0 + translat
+
+    ! Interpolation weights
+    bM4 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-3569. * y0 + 16061.) &
+        & - 27454.) + 21126.) - 6125.) + 49.) - 196.) - 36.) + 144.)) / 40320.
+    bM3 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(32121. * y0 - 144548.) &
+        & + 247074.) - 190092.) + 55125.) - 672.) + 2016.) + 512.) &
+        & - 1536.)) / 40320.
+    bM2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-128484. * y0 + 578188.) &
+        & - 988256.) + 760312.) - 221060.) + 4732.) - 9464.) - 4032.) &
+        & + 8064.)) / 40320.
+    bM = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(299796. * y0 - 1349096.) &
+        & + 2305856.) - 1774136.) + 517580.) - 13664.) + 13664.) &
+        & + 32256.) - 32256.)) / 40320.
+    b0 = (y0*y0*(y0*y0*(y0*(y0*(y0*(y0*(-449694. * y0 + 2023630.) &
+        & - 3458700.) + 2661540.) - 778806.) + 19110.) - 57400.) &
+        & + 40320.) / 40320.
+    bP = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(449694. * y0 - 2023616.) &
+        & + 3458644.) - 2662016.) + 780430.) - 13664.) - 13664.) &
+        & + 32256.) + 32256.)) / 40320.
+    bP2 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-299796. * y0 + 1349068.) &
+        & - 2305744.) + 1775032.) - 520660.) + 4732.) + 9464.) - 4032.) &
+        & - 8064.)) / 40320.
+    bP3 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(128484. * y0 - 578168.) &
+        & + 988176.) - 760872.) + 223020.) - 672.) - 2016.) + 512.) &
+        & + 1536.)) / 40320.
+    bP4 = (y0*(y0*(y0*(y0*(y0*(y0*(y0*(y0*(-32121. * y0 + 144541.) &
+        & - 247046.) + 190246.) - 55685.) + 49.) + 196.) - 36.) &
+        & - 144.)) / 40320.
+    bP5 = (y0*y0*y0*y0*y0*(y0*(y0*(y0*(3569. * y0 - 16060.) + 27450.) &
+        & - 21140.) + 6181.)) / 40320.
+!    b0 = 1. - (bM3+bM2+bM+bP+bP2+bP3+bP4)
+
+    ! remeshing
+    buffer(j0-4)%pter = buffer(j0-4)%pter + sca*bM4
+    buffer(j0-3)%pter = buffer(j0-3)%pter + sca*bM3
+    buffer(j0-2)%pter = buffer(j0-2)%pter + sca*bM2
+    buffer(j0-1)%pter = buffer(j0-1)%pter + sca*bM
+    buffer(j0  )%pter = buffer(j0  )%pter + sca*b0
+    buffer(j0+1)%pter = buffer(j0+1)%pter + sca*bP
+    buffer(j0+2)%pter = buffer(j0+2)%pter + sca*bP2
+    buffer(j0+3)%pter = buffer(j0+3)%pter + sca*bP3
+    buffer(j0+4)%pter = buffer(j0+4)%pter + sca*bP4
+    buffer(j0+5)%pter = buffer(j0+5)%pter + sca*bP5
+
+end subroutine AC_remesh_L8_4_pter
+
+
+!> M'8 remeshing formula - version for array of pointer.
+!! @author Jean-Baptiste Lagaert, LEGI/LJK
+!!      @param[in]       dir     = current direction
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_Mprime8_array(dir, pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    integer, intent(in)                      :: dir
+    real(WP), intent(in)                     :: pos_adim, sca
+    real(WP), dimension(:), intent(inout)    :: buffer
+    ! Other local variables
+    integer     :: j0, j1                   ! indice of the nearest mesh points
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+    real(WP)    :: bM, bM2, bM3, b0, bP, bP2, bP3, bP4  ! interpolation weight for the particles
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    ! M'8 = 15/8*M8 + 9/8 * x * M'8 + 1/8 * x^2 * M''8
+    ! y8 = y1 + 4
+    ! bP4=(y0**7)/2688.-(4-y0)*(y0**6)/640.+((4-y0)**2)*(y0**5)/960
+    bP4=(y0**5)*(y0*(y0/336. - 7./480.) + 1./60.)
+    ! bM3=(1-y0)**7/2688.-(y0+3)*(1-y0)**6/640.+(y0+3)**2*(1-y0)**5/960
+    bM3=y0*(y0*(y0*(y0*(y0*(y0*(-y0/336. + 1./160.) + 1./120.)    &
+        & - 1./32.) + 1./48.) + 1./96.) - 1./60.) + 17./3360.
+    ! bP3=(y0+1)**7/2688.-(3-y0)*(y0+1)**6/640.+(3-y0)**2*(y0+1)**5/960
+    !     -y0**7/336+(3-y0)*y0**6/80.-(3-y0)**2*y0**5/120.
+    bP3=y0*(y0*(y0*(y0*(y0*(y0*(-y0/48. + 3./32.) - 1./12.)       &
+        & - 1./32.) - 1./48.) + 1./96.) + 1./60.) + 17./3360.
+    ! bM2=(2-y0)**7/2688.-(y0+2)*(2-y0)**6/640.+(y0+2)**2*(2-y0)**5/960
+    !     -xx2**7/336+(y0+2)*xx2**6/80.-(y0+2)**2*xx2**5/120.
+    bM2=y0*(y0*(y0*(y0*(y0*(y0*(y0/48. - 5./96.) - 1./24.)        &
+        & + 11./48.) - 1./6.) - 5./48.) + 3./20.) - 17./560.
+    ! bP2=(y0+2)**7/2688.-(2-y0)*(y0+2)**6/640.+(2-y0)**2*(y0+2)**5/960
+    !       -(y0+1)**7/336+(2-y0)*(y0+1)**6/80.-(2-y0)**2*(y0+1)**5/120.
+    !       +y0**7/96.-7.*(2-y0)*y0**6/160.+7.*(2-y0)**2*y0**5/240.
+    bP2=y0*(y0*(y0*(y0*(y0*(y0*(y0/16. - 41./160.) + 19./120.)    &
+        & + 11./48.) + 1./6.) - 5./48.) - 3./20.) - 17./560.
+    ! bM=(3-y0)**7/2688.-(y0+1)*(3-y0)**6/640.+(y0+1)**2*(3-y0)**5/960
+    !       -(2-y0)**7/336+(y0+1)*(2-y0)**6/80.-(y0+1)**2*(2-y0)**5/120.
+    !       +(1-y0)**7/96.-7.*(y0+1)*(1-y0)**6/160.+7.*(y0+1)**2*(1-y0)**5/240.
+    bM=y0*(y0*(y0*(y0*(y0*(y0*(-y0/16. + 29./160.) + 1./15.)     &
+        & - 61./96.) + 13./48.) + 79./96.) - 3./4.) + 17./224.
+    ! bP=(y0+3)**7/2688.-(1-y0)*(y0+3)**6/640.+(1-y0)**2*(y0+3)**5/960
+    !       -(y0+2)**7/336+(1-y0)*(y0+2)**6/80.-(1-y0)**2*(y0+2)**5/120.
+    !       +(y0+1)**7/96.-7.*(1-y0)*(y0+1)**6/160.+7.*(1-y0)**2*(y0+1)**5/240.
+    !       -y0**7/48.+7.*(1-y0)*y0**6/80.-7.*(1-y0)**2*y0**5/120.
+    ! bP=y0*(y0*(y0*(y0*(y0*(y0*(-5.*y0/48. + 37./96.) - 1./8.)    &
+    !    & - 61./96.) - 13./48.) + 79./96.) + 3./4.) + 17./224.
+    ! See below : bP = 1 - (b0+bM+bP2+bM2+bP3+q7+bP4)
+    ! b0=(4-y0)**7/2688.-y0*(4-y0)**6/640.+y0**2*(4-y0)**5/960
+    !       -(3-y0)**7/336+y0*(3-y0)**6/80.-y0**2*(3-y0)**5/120.
+    !       +(2-y0)**7/96.-7.*y0*(2-y0)**6/160.+7.*y0**2*(2-y0)**5/240.
+    !       -(1-y0)**7/48.+7.*y0*(1-y0)**6/80.-7.*y0**2*(1-y0)**5/120.
+    b0=y0**2*((y0**2)*(y0**2*(5.*y0/48. - 11./32.) + 7./8.)     &
+        & - 35./24.) + 151./168.
+
+    bP = 1. - bM3 - bM2 - bM - b0 - bP2 - bP3 - bP4
+
+    ! remeshing
+    j1 = modulo(j0-4,mesh_sc%N(dir))+1  ! j0-3
+    buffer(j1) = buffer(j1) + sca*bM3
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1  ! j0-2
+    buffer(j1) = buffer(j1) + sca*bM2
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    buffer(j1) = buffer(j1) + sca*bM
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(j1) = buffer(j1) + sca*b0
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    buffer(j1) = buffer(j1) + sca*bP
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    buffer(j1) = buffer(j1) + sca*bP2
+    j1 = modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
+    buffer(j1) = buffer(j1) + sca*bP3
+    j1 = modulo(j0+3,mesh_sc%N(dir))+1  ! j0+4
+    buffer(j1) = buffer(j1) + sca*bP4
+
+end subroutine AC_remesh_Mprime8_array
+
+
+!> M'8 remeshing formula - version for array of pointer.
+!! @author Jean-Baptiste Lagaert, LEGI/LJK
+!!      @param[in]       dir     = current direction
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       translat= translation to convert adimensionned particle position to the proper array index
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_Mprime8_pter(pos_adim, translat, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                            :: pos_adim, sca
+    type(real_pter), dimension(:), intent(inout)    :: buffer
+    integer, intent(in)                             :: translat
+    ! Other local variables
+    integer     :: j0                       ! indice of the nearest mesh points
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+    real(WP)    :: bM, bM2, bM3, b0, bP, bP2, bP3, bP4  ! interpolation weight for the particles
+
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! translation to obtain the array index
+    j0 = j0 + translat
+
+    ! Interpolation weights
+    ! M'8 = 15/8*M8 + 9/8 * x * M'8 + 1/8 * x^2 * M''8
+    ! y8 = y1 + 4
+    ! bP4=(y0**7)/2688.-(4-y0)*(y0**6)/640.+((4-y0)**2)*(y0**5)/960
+    bP4=(y0**5)*(y0*(y0/336. - 7./480.) + 1./60.)
+    ! bM3=(1-y0)**7/2688.-(y0+3)*(1-y0)**6/640.+(y0+3)**2*(1-y0)**5/960
+    bM3=y0*(y0*(y0*(y0*(y0*(y0*(-y0/336. + 1./160.) + 1./120.)    &
+        & - 1./32.) + 1./48.) + 1./96.) - 1./60.) + 17./3360.
+    ! bP3=(y0+1)**7/2688.-(3-y0)*(y0+1)**6/640.+(3-y0)**2*(y0+1)**5/960
+    !     -y0**7/336+(3-y0)*y0**6/80.-(3-y0)**2*y0**5/120.
+    bP3=y0*(y0*(y0*(y0*(y0*(y0*(-y0/48. + 3./32.) - 1./12.)       &
+        & - 1./32.) - 1./48.) + 1./96.) + 1./60.) + 17./3360.
+    ! bM2=(2-y0)**7/2688.-(y0+2)*(2-y0)**6/640.+(y0+2)**2*(2-y0)**5/960
+    !     -xx2**7/336+(y0+2)*xx2**6/80.-(y0+2)**2*xx2**5/120.
+    bM2=y0*(y0*(y0*(y0*(y0*(y0*(y0/48. - 5./96.) - 1./24.)        &
+        & + 11./48.) - 1./6.) - 5./48.) + 3./20.) - 17./560.
+    ! bP2=(y0+2)**7/2688.-(2-y0)*(y0+2)**6/640.+(2-y0)**2*(y0+2)**5/960
+    !       -(y0+1)**7/336+(2-y0)*(y0+1)**6/80.-(2-y0)**2*(y0+1)**5/120.
+    !       +y0**7/96.-7.*(2-y0)*y0**6/160.+7.*(2-y0)**2*y0**5/240.
+    bP2=y0*(y0*(y0*(y0*(y0*(y0*(y0/16. - 41./160.) + 19./120.)    &
+        & + 11./48.) + 1./6.) - 5./48.) - 3./20.) - 17./560.
+    ! bM=(3-y0)**7/2688.-(y0+1)*(3-y0)**6/640.+(y0+1)**2*(3-y0)**5/960
+    !       -(2-y0)**7/336+(y0+1)*(2-y0)**6/80.-(y0+1)**2*(2-y0)**5/120.
+    !       +(1-y0)**7/96.-7.*(y0+1)*(1-y0)**6/160.+7.*(y0+1)**2*(1-y0)**5/240.
+    bM=y0*(y0*(y0*(y0*(y0*(y0*(-y0/16. + 29./160.) + 1./15.)     &
+        & - 61./96.) + 13./48.) + 79./96.) - 3./4.) + 17./224.
+    ! bP=(y0+3)**7/2688.-(1-y0)*(y0+3)**6/640.+(1-y0)**2*(y0+3)**5/960
+    !       -(y0+2)**7/336+(1-y0)*(y0+2)**6/80.-(1-y0)**2*(y0+2)**5/120.
+    !       +(y0+1)**7/96.-7.*(1-y0)*(y0+1)**6/160.+7.*(1-y0)**2*(y0+1)**5/240.
+    !       -y0**7/48.+7.*(1-y0)*y0**6/80.-7.*(1-y0)**2*y0**5/120.
+    ! bP=y0*(y0*(y0*(y0*(y0*(y0*(-5.*y0/48. + 37./96.) - 1./8.)    &
+    !    & - 61./96.) - 13./48.) + 79./96.) + 3./4.) + 17./224.
+    ! See below : bP = 1 - (b0+bM+bP2+bM2+bP3+q7+bP4)
+    ! b0=(4-y0)**7/2688.-y0*(4-y0)**6/640.+y0**2*(4-y0)**5/960
+    !       -(3-y0)**7/336+y0*(3-y0)**6/80.-y0**2*(3-y0)**5/120.
+    !       +(2-y0)**7/96.-7.*y0*(2-y0)**6/160.+7.*y0**2*(2-y0)**5/240.
+    !       -(1-y0)**7/48.+7.*y0*(1-y0)**6/80.-7.*y0**2*(1-y0)**5/120.
+    b0=y0**2*((y0**2)*(y0**2*(5.*y0/48. - 11./32.) + 7./8.)     &
+        & - 35./24.) + 151./168.
+
+    bP = 1. - bM3 - bM2 - bM - b0 - bP2 - bP3 - bP4
+
+    ! remeshing
+    buffer(j0-3)%pter = buffer(j0-3)%pter + sca*bM3
+    buffer(j0-2)%pter = buffer(j0-2)%pter + sca*bM2
+    buffer(j0-1)%pter = buffer(j0-1)%pter + sca*bM
+    buffer(j0  )%pter = buffer(j0  )%pter + sca*b0
+    buffer(j0+1)%pter = buffer(j0+1)%pter + sca*bP
+    buffer(j0+2)%pter = buffer(j0+2)%pter + sca*bP2
+    buffer(j0+3)%pter = buffer(j0+3)%pter + sca*bP3
+    buffer(j0+4)%pter = buffer(j0+4)%pter + sca*bP4
+
+end subroutine AC_remesh_Mprime8_pter
+
+
+end module advec_remeshing_Mprime
+!> @}
diff --git a/HySoP/src/scalesReduced/particles/advec_remesh_lambda.f90 b/HySoP/src/scalesReduced/particles/advec_remesh_lambda.f90
new file mode 100644
index 0000000000000000000000000000000000000000..f93168dcd3c49bb40d451401af56fc1252d6d67a
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_remesh_lambda.f90
@@ -0,0 +1,2048 @@
+!USEFORTEST advec
+!> @addtogroup part
+
+!------------------------------------------------------------------------------
+!
+!
+!       ===================================================================
+!       ====================     Remesh particles      ====================
+!       ===================================================================
+!
+! MODULE: advec_remeshing_formula
+!
+!
+! DESCRIPTION:
+!> This module gathers all the remeshing formula of type "corrected lambda".
+!! These interpolation polynoms allow to re-distribute particles on mesh grid at each
+!! iterations.
+!! @details
+!! It provides lambda 2 corrected, lambda 4 corrected and limited lambda 2
+!! corrected.
+!!     The remeshing of type "lambda corrected" are design for large time
+!! step. They are based on lambda formula. The stability condition does not
+!! involve the CFL number but only the velocity gradient:
+!! dt < constant*gradient(velocity)
+!!     Note that such a remeshing formula involve different cases depending
+!! of variation of the local CFL number. Thus particle are gather by group
+!! and "if structure" (actually it is rather a "select case") is applied to
+!! avoid such an "if".
+!! each bloc to match to the right case. Only M' formula (see advec_remesh_Mprime)
+!!     This module also provide some wraper to remesh a complete line
+!! of particles (with the different formula) and to do it either on a
+!! array or into a array of pointer to reals. In order to gather
+!! communications between different lines of particles, it is better to
+!! use continguous memory space for mesh point with belong to the same
+!! processes and thus to use and array of pointer to easily deal with it.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_remeshing_lambda
+
+    use structure_tools
+    use advec_common_line
+
+    implicit none
+
+    ! #############################
+    ! ########## Hearder ##########
+    ! #############################
+
+    ! ===== Public procedures =====
+    ! "Line remeshing" wrapper (they remesh a complete line of particle using
+    ! the adapted interpolation polynom)
+    procedure(AC_remesh_lambda2corrected_array), pointer, public ::  AC_remesh_lambda_array => null()    !> Generic wrapper to remesh a line of particle into an array of real
+    procedure(AC_remesh_lambda2corrected_pter) , pointer, public ::  AC_remesh_lambda_pter  => null()    !> Generic wrapper to remesh a line of particle into an array of pointer
+    ! Line remeshing for each corrected lambda scheme.
+    public                              :: AC_remesh_lambda2corrected_pter
+    public                              :: AC_remesh_lambda4corrected_pter
+    public                              :: AC_remesh_lambda2corrected_array
+    public                              :: AC_remesh_lambda4corrected_array
+    ! To get the right "line remeshing" wrapper
+!   !public                              :: AC_remesh_get_pointer
+
+    ! ===== Private procedures =====
+    !----- Order 2 remeshing formula -----
+    ! Interface
+    private                             :: AC_remesh_O2        ! lambda 2 remeshing formula (for left or center block - no correction)
+    private                             :: AC_remesh_tag_CL    ! corrected formula for tagged particles : transition from C to L block.
+    private                             :: AC_remesh_tag_LC    ! corrected formula for tagged particles : transition from L to C block
+    ! Function used by the interfaces
+    private                             :: AC_remesh_O2_array     ! lambda 2 remeshing formula (for left or center block - no correction)
+    private                             :: AC_remesh_O2_pter      ! lambda 2 remeshing formula (for left or center block - no correction)
+    private                             :: AC_remesh_tag_CL_array   ! corrected formula for tagged particles : transition from C to L block.
+    private                             :: AC_remesh_tag_CL_pter    ! corrected formula for tagged particles : transition from C to L block.
+    !----- Order 4 remeshing formula -----
+    ! Interface
+    private                             :: AC_remesh_O4_left   ! left remeshing formula
+    private                             :: AC_remesh_O4_center ! centered remeshing formula
+    private                             :: AC_remesh_O4_tag_CL ! corrected formula for tagged particles : transition from C to L block.
+    private                             :: AC_remesh_O4_tag_LC ! corrected formula for tagged particles : transition from L to C block
+    ! Function used by the interfaces
+    private                             :: AC_remesh_O4_left_array  ! left remeshing formula - array of real
+    private                             :: AC_remesh_O4_left_pter   ! left remeshing formula - array of pointer
+    private                             :: AC_remesh_O4_center_array! centered remeshing formula
+    private                             :: AC_remesh_O4_center_pter ! centered remeshing formula
+    private                             :: AC_remesh_O4_tag_CL_array! corrected formula for tagged particles : transition from C to L block.
+    private                             :: AC_remesh_O4_tag_CL_pter ! corrected formula for tagged particles : transition from C to L block.
+    private                             :: AC_remesh_O4_tag_LC_array! corrected formula for tagged particles : transition from L to C block
+    private                             :: AC_remesh_O4_tag_LC_pter ! corrected formula for tagged particles : transition from L to C block
+
+
+    !===== Interface =====
+    ! -- Order 2: array of real or of pointer --
+    interface AC_remesh_lambda2corrected
+        module procedure AC_remesh_lambda2corrected_pter, AC_remesh_lambda2corrected_array
+    end interface AC_remesh_lambda2corrected
+
+    interface AC_remesh_O2
+        module procedure AC_remesh_O2_pter, AC_remesh_O2_array
+    end interface AC_remesh_O2
+
+    interface AC_remesh_tag_CL
+        module procedure AC_remesh_tag_CL_pter, AC_remesh_tag_CL_array
+    end interface AC_remesh_tag_CL
+
+    interface AC_remesh_tag_LC
+        module procedure AC_remesh_tag_LC_pter, AC_remesh_tag_LC_array
+    end interface AC_remesh_tag_LC
+
+    ! -- Order 4: array of real or of pointer --
+    interface AC_remesh_lambda4corrected
+        module procedure AC_remesh_lambda4corrected_pter, AC_remesh_lambda4corrected_array
+    end interface AC_remesh_lambda4corrected
+
+    interface AC_remesh_O4_left
+        module procedure AC_remesh_O4_left_pter, AC_remesh_O4_left_array
+    end interface AC_remesh_O4_left
+
+    interface AC_remesh_O4_center
+        module procedure AC_remesh_O4_center_pter, AC_remesh_O4_center_array
+    end interface AC_remesh_O4_center
+
+    interface AC_remesh_O4_tag_CL
+        module procedure AC_remesh_O4_tag_CL_pter, AC_remesh_O4_tag_CL_array
+    end interface AC_remesh_O4_tag_CL
+
+    interface AC_remesh_O4_tag_LC
+        module procedure AC_remesh_O4_tag_LC_pter, AC_remesh_O4_tag_LC_array
+    end interface AC_remesh_O4_tag_LC
+
+    ! -- Order 2 with limitator: array of real or of pointer --
+    !interface AC_remesh_lambda2corrected
+    !    module procedure AC_remesh_lambda2corrected_pter, AC_remesh_lambda2corrected_array
+    !end interface AC_remesh_lambda2corrected
+
+    interface AC_remesh_limitO2
+        module procedure AC_remesh_limitO2_pter, AC_remesh_limitO2_array
+    end interface AC_remesh_limitO2
+
+    interface AC_remesh_limitO2_tag_CL
+        module procedure AC_remesh_limitO2_tag_CL_pter, AC_remesh_limitO2_tag_CL_array
+    end interface AC_remesh_limitO2_tag_CL
+
+    interface AC_remesh_limitO2_tag_LC
+        module procedure AC_remesh_limitO2_tag_LC_pter, AC_remesh_limitO2_tag_LC_array
+    end interface AC_remesh_limitO2_tag_LC
+    ! ===== Abstract procedure =====
+
+    ! --- Abstract profile of subroutine used to remesh a line of particles ---
+    ! Variant: the buffer is an array of pointer (and not a pointer to an array)
+    abstract interface
+        subroutine AC_remesh_line_pter(direction, p_pos_adim, scal1D, bl_type, bl_tag, ind_min, buffer)
+            use structure_tools
+            use advec_variables
+
+            implicit none
+
+            ! Input/Output
+            integer, intent(in)                             :: direction
+            real(WP), dimension(:), intent(in)              :: p_pos_adim
+            real(WP), dimension(:), intent(in)              :: scal1D
+            logical, dimension(:), intent(in)               :: bl_type
+            logical, dimension(:), intent(in)               :: bl_tag
+            integer, intent(in)                             :: ind_min
+            type(real_pter), dimension(:), intent(inout)    :: buffer
+        end subroutine AC_remesh_line_pter
+    end interface
+
+contains
+
+
+! ###################################################################
+! ############                                            ###########
+! ############     Pointer to the right remesh formula    ###########
+! ############                                            ###########
+! ###################################################################
+
+subroutine AC_remesh_get_lambda(pointer)
+
+    use advec_variables         ! solver context
+
+    procedure(AC_remesh_line_pter), pointer, intent(out)    :: pointer ! subroutine wich remesh a line of particle with the right remeshing formula
+
+    select case(trim(type_solv))
+    case ('p_O4')
+        pointer  => AC_remesh_lambda4corrected_pter
+    case default
+        pointer  => AC_remesh_lambda2corrected_pter
+    end select
+
+end subroutine AC_remesh_get_lambda
+
+
+subroutine AC_remesh_init_lambda()
+
+    use advec_variables         ! solver cntext
+
+    select case(trim(type_solv))
+    case ('p_O4')
+        AC_remesh_lambda_array => AC_remesh_lambda4corrected_array
+        AC_remesh_lambda_pter  => AC_remesh_lambda4corrected_pter
+    case default
+        AC_remesh_lambda_array => AC_remesh_lambda2corrected_array
+        AC_remesh_lambda_pter  => AC_remesh_lambda2corrected_pter
+    end select
+
+end subroutine AC_remesh_init_lambda
+
+
+! ###################################################################
+! ############                                            ###########
+! ############     Wrapper to remesh a complete line      ###########
+! ############                                            ###########
+! ###################################################################
+
+!> Remesh particle line with corrected lambda 2 formula - remeshing is done into
+!! an array of pointer to real
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        scal1D      = scalar field to advect
+!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+!!    @param[in]        ind_min     = minimal indice of the send buffer
+!!    @param[in, out]   send_buffer = array of pointers to the buffer use to remesh the scalar before to send it to the right subdomain
+!! @details
+!!     Use corrected lambda 2 remeshing formula.
+!! This remeshing formula depends on the particle type :
+!!     1 - Is the particle tagged ?
+!!     2 - Does it belong to a centered or a left block ?
+!! Observe that tagged particles go by group of two : if the particles of a
+!! block end are tagged, the one first one of the following block are
+!! tagged too.
+!! The following algorithm is write for block of minimal size.
+!! @author = Jean-Baptiste Lagaert, LEGI/Ljk
+subroutine AC_remesh_lambda2corrected_pter(direction, p_pos_adim, scal1D, bl_type, bl_tag, ind_min, send_buffer)
+
+    use cart_topology   ! Description of mesh and of mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                                         :: direction
+    real(WP), dimension(:), intent(in)                          :: p_pos_adim
+    real(WP), dimension(:), intent(in)                          :: scal1D
+    logical, dimension(:), intent(in)                           :: bl_type
+    logical, dimension(:), intent(in)                           :: bl_tag
+    integer, intent(in)                                         :: ind_min
+    type(real_pter), dimension(:), intent(inout)                :: send_buffer
+    ! Other local variables
+    integer                                     :: bl_ind       ! indice of the current "block end".
+    integer                                     :: p_ind        ! indice of the current particle
+    real(WP), dimension(mesh_sc%N_proc(direction))      :: pos_translat ! translation of p_pos_adim as array indice are now starting from 1 and not ind_min
+
+    pos_translat = p_pos_adim - ind_min + 1
+
+    do p_ind = 1, mesh_sc%N_proc(direction), bl_size
+        bl_ind = p_ind/bl_size + 1
+        if (bl_tag(bl_ind)) then
+            ! Tag case
+                ! XXX Debug : to activate only in purpose debug
+                !if (bl_type(ind).neqv. (.not. bl_type(ind+1))) then
+                !    write(*,'(a,x,3(L1,x),a,3(i0,a))'), 'error on remeshing particles: (tag,type(i), type(i+1)) =', &
+                !    & bl_tag(ind), bl_type(ind), bl_type(ind+1), ' and type must be different. Mesh point = (',i, ', ', j,', ',k,')'
+                !    write(*,'(a,x,i0)'),  'paramètres du blocs : ind =', bl_ind
+                !    stop
+                !end if
+                ! XXX Debug - end
+            if (bl_type(bl_ind)) then
+                ! tagged, the first particle belong to a centered block and the last to left block.
+                call AC_remesh_tag_CL(pos_translat(p_ind), scal1D(p_ind), pos_translat(p_ind+1), scal1D(p_ind+1), send_buffer)
+            else
+                ! tagged, the first particle belong to a left block and the last to centered block.
+                call AC_remesh_tag_LC(pos_translat(p_ind), scal1D(p_ind), pos_translat(p_ind+1), scal1D(p_ind+1), send_buffer)
+            end if
+        else
+            ! First particle
+            call AC_remesh_O2(pos_translat(p_ind),scal1D(p_ind), bl_type(bl_ind), send_buffer)
+            ! Second particle is remeshed with left formula
+            call AC_remesh_O2(pos_translat(p_ind+1),scal1D(p_ind+1), bl_type(bl_ind+1), send_buffer)
+        end if
+    end do
+
+end subroutine AC_remesh_lambda2corrected_pter
+
+
+!> Remesh particle line with corrected lambda 2 formula - remeshing is done into
+!! an real array - no communication variant (buffer does not have the same size)
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        scal1D      = scalar field to advect
+!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+!!    @param[in, out]   remesh_buffer= buffer use to remesh the scalar
+!! @details
+!!     Use corrected lambda 2 remeshing formula.
+!! This remeshing formula depends on the particle type :
+!!     1 - Is the particle tagged ?
+!!     2 - Does it belong to a centered or a left block ?
+!! Observe that tagged particles go by group of two : if the particles of a
+!! block end are tagged, the one first one of the following block are
+!! tagged too.
+!! The following algorithm is write for block of minimal size.
+!! @author = Jean-Baptiste Lagaert, LEGI/Ljk
+subroutine ac_remesh_lambda2corrected_array(direction, p_pos_adim, scal1d, bl_type, bl_tag, remesh_buffer)
+
+    use cart_topology   ! description of mesh and of mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! input/output
+    integer, intent(in)                                 :: direction
+    real(wp), dimension(:), intent(in)                  :: p_pos_adim
+    real(wp), dimension(mesh_sc%N_proc(direction)), intent(in)  :: scal1d
+    logical, dimension(:), intent(in)                   :: bl_type
+    logical, dimension(:), intent(in)                   :: bl_tag
+    real(wp), dimension(:), intent(inout)               :: remesh_buffer
+    ! Other local variables
+    integer     :: bl_ind                               ! indice of the current "block end".
+    integer     :: p_ind                                ! indice of the current particle
+
+    do p_ind = 1, mesh_sc%N_proc(direction), bl_size
+        bl_ind = p_ind/bl_size + 1
+        if (bl_tag(bl_ind)) then
+            ! Tag case
+                ! XXX Debug : to activate only in purpose debug
+                !if (bl_type(ind).neqv. (.not. bl_type(ind+1))) then
+                !    write(*,'(a,x,3(L1,x),a,3(i0,a))'), 'error on remeshing particles: (tag,type(i), type(i+1)) =', &
+                !    & bl_tag(ind), bl_type(ind), bl_type(ind+1), ' and type must be different. Mesh point = (',i, ', ', j,', ',k,')'
+                !    write(*,'(a,x,i0)'),  'paramètres du blocs : ind =', bl_ind
+                !    stop
+                !end if
+                ! XXX Debug - end
+            if (bl_type(bl_ind)) then
+                ! tagged, the first particle belong to a centered block and the last to left block.
+                call AC_remesh_tag_CL(direction, p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), remesh_buffer)
+            else
+                ! tagged, the first particle belong to a left block and the last to centered block.
+                call AC_remesh_tag_LC(direction, p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), remesh_buffer)
+            end if
+        else
+            ! First particle
+            call AC_remesh_O2(direction, p_pos_adim(p_ind),scal1D(p_ind), bl_type(bl_ind), remesh_buffer)
+            ! Second particle is remeshed with left formula
+            call AC_remesh_O2(direction, p_pos_adim(p_ind+1),scal1D(p_ind+1), bl_type(bl_ind+1), remesh_buffer)
+        end if
+    end do
+
+end subroutine AC_remesh_lambda2corrected_array
+
+
+!> Remesh particle line with corrected lambda 4 formula - array version
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        scal1D      = scalar field to advect
+!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+!!    @param[in, out]   remesh_buffer = buffer use to remesh the scalar before to send it to the right subdomain
+!! @details
+!!     Use corrected lambda 2 remeshing formula.
+!! This remeshing formula depends on the particle type :
+!!     1 - Is the particle tagged ?
+!!     2 - Does it belong to a centered or a left block ?
+!! Observe that tagged particles go by group of two : if the particles of a
+!! block end are tagged, the one first one of the following block are
+!! tagged too.
+!! The following algorithm is write for block of minimal size.
+!! @author = Jean-Baptiste Lagaert, LEGI/Ljk
+subroutine AC_remesh_lambda4corrected_array(direction, p_pos_adim, scal1D, bl_type, bl_tag, remesh_buffer)
+
+    use cart_topology   ! Description of mesh and of mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                                 :: direction
+    real(WP), dimension(:), intent(in)                  :: p_pos_adim
+    real(WP), dimension(mesh_sc%N_proc(direction)), intent(in)  :: scal1D
+    logical, dimension(:), intent(in)                   :: bl_type
+    logical, dimension(:), intent(in)                   :: bl_tag
+    real(WP), dimension(:), intent(inout)               :: remesh_buffer
+    ! Other local variables
+    integer     :: bl_ind                               ! indice of the current "block end".
+    integer     :: p_ind                                ! indice of the current particle
+
+    do p_ind = 1, mesh_sc%N_proc(direction), bl_size
+        bl_ind = p_ind/bl_size + 1
+        if (bl_tag(bl_ind)) then
+            ! Tagged case
+            if (bl_type(bl_ind)) then
+                ! tagged, the first particle belong to a centered block and the last to left block.
+                call AC_remesh_O4_tag_CL(direction, p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), &
+                        & p_pos_adim(p_ind+2), scal1D(p_ind+2), p_pos_adim(p_ind+3), scal1D(p_ind+3), remesh_buffer)
+            else
+                ! tagged, the first particle belong to a left block and the last to centered block.
+                call AC_remesh_O4_tag_LC(direction, p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), scal1D(p_ind+1), &
+                        & p_pos_adim(p_ind+2), scal1D(p_ind+2), p_pos_adim(p_ind+3), scal1D(p_ind+3), remesh_buffer)
+            end if
+        else
+            ! No tag
+            if (bl_type(bl_ind)) then
+                call AC_remesh_O4_center(direction, p_pos_adim(p_ind),scal1D(p_ind), remesh_buffer)
+                call AC_remesh_O4_center(direction, p_pos_adim(p_ind+1),scal1D(p_ind+1), remesh_buffer)
+            else
+                call AC_remesh_O4_left(direction, p_pos_adim(p_ind),scal1D(p_ind), remesh_buffer)
+                call AC_remesh_O4_left(direction, p_pos_adim(p_ind+1),scal1D(p_ind+1), remesh_buffer)
+            end if
+            if (bl_type(bl_ind+1)) then
+                call AC_remesh_O4_center(direction, p_pos_adim(p_ind+2),scal1D(p_ind+2), remesh_buffer)
+                call AC_remesh_O4_center(direction, p_pos_adim(p_ind+3),scal1D(p_ind+3), remesh_buffer)
+            else
+                call AC_remesh_O4_left(direction, p_pos_adim(p_ind+2),scal1D(p_ind+2), remesh_buffer)
+                call AC_remesh_O4_left(direction, p_pos_adim(p_ind+3),scal1D(p_ind+3), remesh_buffer)
+            end if
+        end if
+    end do
+
+end subroutine AC_remesh_lambda4corrected_array
+
+
+!> Remesh particle line with corrected lambda 4 formula - array pter version
+!!    @param[in]        direction       = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        p_pos_adim      = adimensionned  particles position
+!!    @param[in]        scal1D          = scalar field to advect
+!!    @param[in]        bl_type         = equal 0 (resp 1) if the block is left (resp centered)
+!!    @param[in]        bl_tag          = contains information about bloc (is it tagged ?)
+!!    @param[in]        ind_min         = minimal indice of the send buffer
+!!    @param[in, out]   remesh_buffer   = array of pointer to the buffer use to locally remesh the scalar
+!! @details
+!!     Use corrected lambda 4 remeshing formula.
+!! This remeshing formula depends on the particle type :
+!!     1 - Is the particle tagged ?
+!!     2 - Does it belong to a centered or a left block ?
+!! Observe that tagged particles go by group of two : if the particles of a
+!! block end are tagged, the one first one of the following block are
+!! tagged too.
+!! The following algorithm is write for block of minimal size.
+!! @author = Jean-Baptiste Lagaert, LEGI/Ljk
+subroutine AC_remesh_lambda4corrected_pter(direction, p_pos_adim, scal1D, bl_type, bl_tag, ind_min, remesh_buffer)
+
+    use cart_topology   ! Description of mesh and of mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                                 :: direction
+    real(WP), dimension(:), intent(in)                  :: p_pos_adim
+    real(WP), dimension(:), intent(in)                  :: scal1D
+    logical, dimension(:), intent(in)                   :: bl_type
+    logical, dimension(:), intent(in)                   :: bl_tag
+    integer, intent(in)                                 :: ind_min
+    type(real_pter), dimension(:), intent(inout)        :: remesh_buffer
+    ! Other local variables
+    integer     :: bl_ind                               ! indice of the current "block end".
+    integer     :: p_ind                                ! indice of the current particle
+    real(WP), dimension(mesh_sc%N_proc(direction))      :: pos_translat ! translation of p_pos_adim as array indice are now starting from 1 and not ind_min
+
+    pos_translat = p_pos_adim - ind_min + 1
+
+    do p_ind = 1, mesh_sc%N_proc(direction), bl_size
+        bl_ind = p_ind/bl_size + 1
+        if (bl_tag(bl_ind)) then
+            ! Tagged case
+            if (bl_type(bl_ind)) then
+                ! tagged, the first particle belong to a centered block and the last to left block.
+                call AC_remesh_O4_tag_CL(pos_translat(p_ind), scal1D(p_ind), pos_translat(p_ind+1), scal1D(p_ind+1), &
+                        & pos_translat(p_ind+2), scal1D(p_ind+2), pos_translat(p_ind+3), scal1D(p_ind+3), remesh_buffer)
+            else
+                ! tagged, the first particle belong to a left block and the last to centered block.
+                call AC_remesh_O4_tag_LC(pos_translat(p_ind), scal1D(p_ind), pos_translat(p_ind+1), scal1D(p_ind+1), &
+                        & pos_translat(p_ind+2), scal1D(p_ind+2), pos_translat(p_ind+3), scal1D(p_ind+3), remesh_buffer)
+            end if
+        else
+            ! No tag
+            if (bl_type(bl_ind)) then
+                call AC_remesh_O4_center(pos_translat(p_ind),scal1D(p_ind), remesh_buffer)
+                call AC_remesh_O4_center(pos_translat(p_ind+1),scal1D(p_ind+1), remesh_buffer)
+            else
+                call AC_remesh_O4_left(pos_translat(p_ind),scal1D(p_ind), remesh_buffer)
+                call AC_remesh_O4_left(pos_translat(p_ind+1),scal1D(p_ind+1), remesh_buffer)
+            end if
+            if (bl_type(bl_ind+1)) then
+                call AC_remesh_O4_center(pos_translat(p_ind+2),scal1D(p_ind+2), remesh_buffer)
+                call AC_remesh_O4_center(pos_translat(p_ind+3),scal1D(p_ind+3), remesh_buffer)
+            else
+                call AC_remesh_O4_left(pos_translat(p_ind+2),scal1D(p_ind+2), remesh_buffer)
+                call AC_remesh_O4_left(pos_translat(p_ind+3),scal1D(p_ind+3), remesh_buffer)
+            end if
+        end if
+    end do
+
+end subroutine AC_remesh_lambda4corrected_pter
+
+
+!> Remesh particle line with corrected and limited lambda 2 formula - remeshing is done into
+!! an array of pointer to real
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        scal1D      = scalar field to advect
+!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+!!    @param[in]        limit       = limitator function value associated to the right and the left scalar variations
+!!    @param[in]        ind_min     = minimal indice of the send buffer
+!!    @param[in, out]   send_buffer = array of pointers to the buffer use to remesh the scalar before to send it to the right subdomain
+!! @details
+!!     Use corrected lambda 2 remeshing formula.
+!! This remeshing formula depends on the particle type :
+!!     1 - Is the particle tagged ?
+!!     2 - Does it belong to a centered or a left block ?
+!! Observe that tagged particles go by group of two : if the particles of a
+!! block end are tagged, the one first one of the following block are
+!! tagged too.
+!! The following algorithm is write for block of minimal size.
+!!    Note that instead of the value of the limitator function, it is actually
+!! these values divided by 8 wich are given as arguments. As the limitator function
+!! always appear divided by 8 in the remeshing polynom, perform this division
+!! during the computation of the limitator function enhances the performances.
+!! @author = Jean-Baptiste Lagaert, LEGI/Ljk
+subroutine AC_remesh_lambda2limited_pter(direction, p_pos_adim, scal1D, bl_type, bl_tag, ind_min, limit, send_buffer)
+
+    use cart_topology   ! Description of mesh and of mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                                         :: direction
+    real(WP), dimension(:), intent(in)                          :: p_pos_adim
+    real(WP), dimension(:), intent(in)                          :: scal1D
+    real(WP), dimension(:), intent(in)                          :: limit
+    logical, dimension(:), intent(in)                           :: bl_type
+    logical, dimension(:), intent(in)                           :: bl_tag
+    integer, intent(in)                                         :: ind_min
+    type(real_pter), dimension(:), intent(inout)                :: send_buffer
+    ! Other local variables
+    integer                                     :: bl_ind       ! indice of the current "block end".
+    integer                                     :: p_ind        ! indice of the current particle
+    real(WP), dimension(mesh_sc%N_proc(direction))      :: pos_translat ! translation of p_pos_adim as array indice are now starting from 1 and not ind_min
+
+    pos_translat = p_pos_adim - ind_min + 1
+
+    do p_ind = 1, mesh_sc%N_proc(direction), bl_size
+        bl_ind = p_ind/bl_size + 1
+        if (bl_tag(bl_ind)) then
+            ! Tag case
+                ! XXX Debug : to activate only in purpose debug
+                !if (bl_type(ind).neqv. (.not. bl_type(ind+1))) then
+                !    write(*,'(a,x,3(L1,x),a,3(i0,a))'), 'error on remeshing particles: (tag,type(i), type(i+1)) =', &
+                !    & bl_tag(ind), bl_type(ind), bl_type(ind+1), ' and type must be different. Mesh point = (',i, ', ', j,', ',k,')'
+                !    write(*,'(a,x,i0)'),  'paramètres du blocs : ind =', bl_ind
+                !    stop
+                !end if
+                ! XXX Debug - end
+            if (bl_type(bl_ind)) then
+                ! tagged, the first particle belong to a centered block and the last to left block.
+                call AC_remesh_limitO2_tag_CL(pos_translat(p_ind), scal1D(p_ind), pos_translat(p_ind+1), &
+                        & scal1D(p_ind+1), limit(p_ind:p_ind+2), send_buffer)
+            else
+                ! tagged, the first particle belong to a left block and the last to centered block.
+                call AC_remesh_limitO2_tag_LC(pos_translat(p_ind), scal1D(p_ind), pos_translat(p_ind+1), &
+                        & scal1D(p_ind+1), limit(p_ind:p_ind+2), send_buffer)
+            end if
+        else
+            ! First particle
+            call AC_remesh_limitO2(pos_translat(p_ind),scal1D(p_ind), bl_type(bl_ind), limit(p_ind:p_ind+1), send_buffer)
+            ! Second particle is remeshed with left formula
+            call AC_remesh_limitO2(pos_translat(p_ind+1),scal1D(p_ind+1), bl_type(bl_ind+1), limit(p_ind+1:p_ind+2), send_buffer)
+        end if
+    end do
+
+end subroutine AC_remesh_lambda2limited_pter
+
+
+!> Remesh particle line with corrected lambda 2 formula - remeshing is done into
+!! an real array - no communication variant (buffer does not have the same size)
+!!    @param[in]        direction   = current direction (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        p_pos_adim  = adimensionned  particles position
+!!    @param[in]        scal1D      = scalar field to advect
+!!    @param[in]        bl_type     = equal 0 (resp 1) if the block is left (resp centered)
+!!    @param[in]        bl_tag      = contains information about bloc (is it tagged ?)
+!!    @param[in]        limit       = limitator function value associated to the right and the left scalar variations
+!!    @param[in, out]   remesh_buffer= buffer use to remesh the scalar
+!! @details
+!!     Use corrected lambda 2 remeshing formula.
+!! This remeshing formula depends on the particle type :
+!!     1 - Is the particle tagged ?
+!!     2 - Does it belong to a centered or a left block ?
+!! Observe that tagged particles go by group of two : if the particles of a
+!! block end are tagged, the one first one of the following block are
+!! tagged too.
+!! The following algorithm is write for block of minimal size.
+!!    Note that instead of the value of the limitator function, it is actually
+!! these values divided by 8 wich are given as arguments. As the limitator function
+!! always appear divided by 8 in the remeshing polynom, perform this division
+!! during the computation of the limitator function enhances the performances.
+!! @author = Jean-Baptiste Lagaert, LEGI/Ljk
+subroutine AC_remesh_lambda2limited_array(direction, p_pos_adim, scal1d, bl_type, bl_tag, limit, remesh_buffer)
+
+    use cart_topology   ! description of mesh and of mpi topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! input/output
+    integer, intent(in)                                 :: direction
+    real(wp), dimension(:), intent(in)                  :: p_pos_adim
+    real(wp), dimension(:), intent(in)                  :: scal1d
+    real(WP), dimension(:), intent(in)                  :: limit
+    logical, dimension(:), intent(in)                   :: bl_type
+    logical, dimension(:), intent(in)                   :: bl_tag
+    real(wp), dimension(:), intent(inout)               :: remesh_buffer
+    ! Other local variables
+    integer     :: bl_ind                               ! indice of the current "block end".
+    integer     :: p_ind                                ! indice of the current particle
+
+    do p_ind = 1, mesh_sc%N_proc(direction), bl_size
+        bl_ind = p_ind/bl_size + 1
+        if (bl_tag(bl_ind)) then
+            ! Tag case
+                ! XXX Debug : to activate only in purpose debug
+                !if (bl_type(ind).neqv. (.not. bl_type(ind+1))) then
+                !    write(*,'(a,x,3(L1,x),a,3(i0,a))'), 'error on remeshing particles: (tag,type(i), type(i+1)) =', &
+                !    & bl_tag(ind), bl_type(ind), bl_type(ind+1), ' and type must be different. Mesh point = (',i, ', ', j,', ',k,')'
+                !    write(*,'(a,x,i0)'),  'paramètres du blocs : ind =', bl_ind
+                !    stop
+                !end if
+                ! XXX Debug - end
+            if (bl_type(bl_ind)) then
+                ! tagged, the first particle belong to a centered block and the last to left block.
+                call AC_remesh_limitO2_tag_CL(direction, p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), &
+                        & scal1D(p_ind+1), limit(p_ind:p_ind+2), remesh_buffer)
+            else
+                ! tagged, the first particle belong to a left block and the last to centered block.
+                call AC_remesh_limitO2_tag_LC(direction, p_pos_adim(p_ind), scal1D(p_ind), p_pos_adim(p_ind+1), &
+                        & scal1D(p_ind+1), limit(p_ind:p_ind+2), remesh_buffer)
+            end if
+        else
+            ! First particle
+            call AC_remesh_limitO2(direction, p_pos_adim(p_ind),scal1D(p_ind), bl_type(bl_ind), limit(p_ind:p_ind+1), remesh_buffer)
+            ! Second particle is remeshed with left formula
+            call AC_remesh_limitO2(direction, p_pos_adim(p_ind+1),scal1D(p_ind+1), bl_type(bl_ind+1), limit(p_ind+1:p_ind+2), remesh_buffer)
+        end if
+    end do
+
+end subroutine AC_remesh_lambda2limited_array
+
+
+! #########################################################################
+! ############                                                  ###########
+! ############     Interpolation polynom used for remeshing     ###########
+! ############                                                  ###########
+! #########################################################################
+
+! ============================================================
+! ============     Lambda 2 corrected formula     ============
+! ============================================================
+
+!> (center or left) lambda remeshing formula of order 2 - version for classical array
+!!      @param[in]       dir     = current direction
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in]       bl_type = equal 0 (resp 1) if the block is left (resp centered)
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_O2_array(dir, pos_adim, sca, bl_type, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                    :: pos_adim, sca
+    integer, intent(in)                     :: dir
+    logical, intent(in)                     :: bl_type
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Ohter local variables
+    integer     :: j0, j1                   ! indice of the the nearest mesh points
+    real(WP)    :: bM, b0, bP               ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    if (bl_type) then
+        ! Center remeshing
+        j0 = nint(pos_adim)
+    else
+        ! Left remeshing
+        j0 = floor(pos_adim)
+    end if
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    bM=0.5*y0*(y0-1.)
+    b0=1.-y0**2
+    !bP=0.5*y0*(y0+1.)
+    bP=1. - (b0+bM)
+
+    ! remeshing
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1 ! j0-1
+    buffer(j1) = buffer(j1) + bM*sca
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1 ! j0
+    buffer(j1) = buffer(j1) + b0*sca
+    j1 = modulo(j0,mesh_sc%N(dir))+1   ! j0+1
+    buffer(j1) = buffer(j1) + bP*sca
+
+end subroutine AC_remesh_O2_array
+
+
+!> (center or left) lambda remeshing formula of order 2 - version for array of pointer
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in]       bl_type = equal 0 (resp 1) if the block is left (resp centered)
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_O2_pter(pos_adim, sca, bl_type, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                                :: pos_adim, sca
+    logical, intent(in)                                 :: bl_type
+    type(real_pter), dimension(:), intent(inout)        :: buffer
+    ! Ohter local variables
+    integer     :: j0                       ! indice of the the nearest mesh points
+    real(WP)    :: bM, b0, bP               ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    if (bl_type) then
+        ! Center remeshing
+        j0 = nint(pos_adim)
+    else
+        ! Left remeshing
+        j0 = floor(pos_adim)
+    end if
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    bM=0.5*y0*(y0-1.)
+    b0=1.-y0**2
+    !bP=0.5*y0*(y0+1.)
+    bP=1. - (b0+bM)
+
+    ! remeshing
+    buffer(j0-1)%pter = buffer(j0-1)%pter + bM*sca
+    buffer(j0)%pter   = buffer(j0)%pter   + b0*sca
+    buffer(j0+1)%pter = buffer(j0+1)%pter + bP*sca
+
+end subroutine AC_remesh_O2_pter
+
+
+!> Corrected remeshing formula for transition from Centered block to a Left block with a different indice (tagged particles)
+!!    @param[in]       dir     = current direction
+!!    @param[in]       pos_adim= adimensionned particle position
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the second particle
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @detail
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a centered block
+!!    and the last to a left one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_tag_CL_array(dir, pos_adim, sca, posP_ad, scaP, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca, posP_ad, scaP
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP               ! indice of the the nearest mesh points
+                                            ! (they depend on the block type)
+    integer     :: j0_bis                   ! indice of the the nearest mesh point for the indP=ind+1 particle
+    real(WP)    :: aM, a0, bP, b0           ! interpolation weight for the particles
+    real(WP)    :: y0, y0_bis               ! adimensionned distance to mesh points
+
+    j0 = nint(pos_adim)
+    !j0 = nint(pos/d_sc(2))
+    j0_bis = floor(posP_ad)
+    !j0_bis = floor(posP/d_sc(2))
+
+    y0 = (pos_adim - real(j0, WP))
+    !y0 = (pos - real(j0, WP)*d_sc(2))/d_sc(2)
+    y0_bis = (posP_ad - real(j0_bis, WP))
+    !y0_bis = (posP - real(j0_bis, WP)*d_sc(2))/d_sc(2)
+
+    aM=0.5*y0*(y0-1)
+    a0=1.-aM
+    bP=0.5*y0_bis*(y0_bis+1.)
+    b0=1.-bP
+
+    ! Remeshing
+    jM = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    jP = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    j0 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(jM)=buffer(jM)+aM*sca
+    buffer(j0)=buffer(j0)+a0*sca+b0*scaP
+    buffer(jP)=buffer(jP)+bP*scaP
+
+end subroutine AC_remesh_tag_CL_array
+
+
+!> Corrected remeshing formula for transition from Centered block to a Left block with a different indice (tagged particles)
+!!    @param[in]       pos_adim= adimensionned particle position
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the second particle
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a centered block
+!!    and the last to a left one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_tag_CL_pter(pos_adim, sca, posP_ad, scaP, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    real(WP), intent(in)                            :: pos_adim, sca, posP_ad, scaP
+    type(real_pter), dimension(:), intent(inout)    :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP               ! indice of the the nearest mesh points
+                                            ! (they depend on the block type)
+    integer     :: j0_bis                   ! indice of the the nearest mesh point for the indP=ind+1 particle
+    real(WP)    :: aM, a0, bP, b0           ! interpolation weight for the particles
+    real(WP)    :: y0, y0_bis               ! adimensionned distance to mesh points
+
+    j0 = nint(pos_adim)
+    !j0 = nint(pos/d_sc(2))
+    j0_bis = floor(posP_ad)
+    !j0_bis = floor(posP/d_sc(2))
+    jM=j0-1
+    jP=j0+1
+
+    y0 = (pos_adim - real(j0, WP))
+    !y0 = (pos - real(j0, WP)*d_sc(2))/d_sc(2)
+    y0_bis = (posP_ad - real(j0_bis, WP))
+    !y0_bis = (posP - real(j0_bis, WP)*d_sc(2))/d_sc(2)
+
+    aM=0.5*y0*(y0-1)
+    a0=1.-aM
+    bP=0.5*y0_bis*(y0_bis+1.)
+    b0=1.-bP
+
+    ! Remeshing
+    buffer(jM)%pter=buffer(jM)%pter+aM*sca
+    buffer(j0)%pter=buffer(j0)%pter+a0*sca+b0*scaP
+    buffer(jP)%pter=buffer(jP)%pter+bP*scaP
+
+end subroutine AC_remesh_tag_CL_pter
+
+
+!> Corrected remeshing formula for transition from Left block to a Centered  block with a different indice (tagged particles)
+!!    @param[in]       dir     = current direction
+!!    @param[in]       pos_adim= adimensionned particle position
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the second particle
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a left block
+!!    and the last to a centered one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_tag_LC_array(dir, pos_adim, sca, posP_ad, scaP, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca, posP_ad, scaP
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP, jP2, jP3             ! indice of the the nearest mesh points
+                                                    ! (they depend on the block type)
+    integer     :: j0_bis                           ! indice of the the nearest mesh point for the indP=ind+1 particle
+    real(WP)    :: aM, a0, aP,aP2, b0, bP, bP2, bP3 ! interpolation weight for the particles
+    real(WP)    :: y0, y0_bis                       ! adimensionned distance to mesh points
+
+
+    ! Indice of mesh point used in order to remesh
+    j0 = floor(pos_adim)
+    !j0 = floor(pos/d_sc(2))
+    j0_bis = nint(posP_ad)
+    !j0_bis = nint(posP/d_sc(2))
+    jM=j0-1
+    jP=j0+1
+    jP2=j0+2
+    jP3=j0+3
+
+    ! Distance to mesh point
+    y0 = (pos_adim - real(j0, WP))
+    !y0 = (pos - real(j0, WP)*d_sc(2))/d_sc(2)
+    y0_bis = (posP_ad - real(j0_bis, WP))
+    !y0_bis = (posP - real(j0_bis, WP)*d_sc(2))/d_sc(2)
+
+    ! Interpolation weight
+    a0=1-y0**2
+    aP=y0
+    !aM=y0*yM/2.
+    aM = 0.5-(a0+aP)/2.
+    aP2=aM
+    bP=-y0_bis
+    bP2=1-y0_bis**2
+    !b0=y0_bis*yP_bis/2.
+    b0 = 0.5-(bP+bP2)/2.
+    bP3=b0
+
+    ! Remeshing
+    jM = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    jP = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    jP2= modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    jP3= modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
+    j0 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(jM)= buffer(jM)+aM*sca
+    buffer(j0)= buffer(j0)+a0*sca+b0*scaP
+    buffer(jP)= buffer(jP)+aP*sca+bP*scaP
+    buffer(jP2)=buffer(jP2)+aP2*sca+bP2*scaP
+    buffer(jP3)=buffer(jP3)+bP3*scaP
+
+end subroutine AC_remesh_tag_LC_array
+
+
+!> Corrected remeshing formula for transition from Left block to a Centered  block with a different indice (tagged particles)
+!!    @param[in]       pos_adim= adimensionned particle position
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the second particle
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a left block
+!!    and the last to a centered one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_tag_LC_pter(pos_adim, sca, posP_ad, scaP, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    real(WP), intent(in)                            :: pos_adim, sca, posP_ad, scaP
+    type(real_pter), dimension(:), intent(inout)    :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP, jP2, jP3             ! indice of the the nearest mesh points
+                                                    ! (they depend on the block type)
+    integer     :: j0_bis                           ! indice of the the nearest mesh point for the indP=ind+1 particle
+    real(WP)    :: aM, a0, aP,aP2, b0, bP, bP2, bP3 ! interpolation weight for the particles
+    real(WP)    :: y0, y0_bis                       ! adimensionned distance to mesh points
+
+
+    ! Indice of mesh point used in order to remesh
+    j0 = floor(pos_adim)
+    !j0 = floor(pos/d_sc(2))
+    j0_bis = nint(posP_ad)
+    !j0_bis = nint(posP/d_sc(2))
+    jM=j0-1
+    jP=j0+1
+    jP2=j0+2
+    jP3=j0+3
+
+    ! Distance to mesh point
+    y0 = (pos_adim - real(j0, WP))
+    !y0 = (pos - real(j0, WP)*d_sc(2))/d_sc(2)
+    y0_bis = (posP_ad - real(j0_bis, WP))
+    !y0_bis = (posP - real(j0_bis, WP)*d_sc(2))/d_sc(2)
+
+    ! Interpolation weight
+    a0=1-y0**2
+    aP=y0
+    !aM=y0*yM/2.
+    aM = 0.5-(a0+aP)/2.
+    aP2=aM
+    bP=-y0_bis
+    bP2=1-y0_bis**2
+    !b0=y0_bis*yP_bis/2.
+    b0 = 0.5-(bP+bP2)/2.
+    bP3=b0
+
+    ! Remeshing
+    buffer(jM)%pter= buffer(jM)%pter+aM*sca
+    buffer(j0)%pter= buffer(j0)%pter+a0*sca+b0*scaP
+    buffer(jP)%pter= buffer(jP)%pter+aP*sca+bP*scaP
+    buffer(jP2)%pter=buffer(jP2)%pter+aP2*sca+bP2*scaP
+    buffer(jP3)%pter=buffer(jP3)%pter+bP3*scaP
+
+end subroutine AC_remesh_tag_LC_pter
+
+
+! ========================================================================
+! ============     Lambda 2 corrected and limited formula     ============
+! ========================================================================
+
+!> (center or left) remeshing formula for lambda 2 corrected and limited - classical array
+!! version
+!!      @param[in]       dir     = current direction
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in]       bl_type = equal 0 (resp 1) if the block is left (resp centered)
+!!      @param[in]       limit   = limitator function value associated to the right and the left scalar variations
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Note that instead of the value of the limitator funciton, it is actually
+!! these values divided by 8 wich are given as arguments. As the limitator function
+!! always appear divided by 8 in the remeshing polynom, perform this division
+!! during the computation of the limitator function enhances the performances.
+subroutine AC_remesh_limitO2_array(dir, pos_adim, sca, bl_type, limit, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                    :: pos_adim, sca
+    logical, intent(in)                     :: bl_type
+    real(WP), dimension(2), intent(in)      :: limit
+    integer, intent(in)                     :: dir
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Ohter local variables
+    integer     :: j0, j1                   ! indice of the the nearest mesh points
+    real(WP)    :: bM, b0, bP               ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    if (bl_type) then
+        ! Center remeshing
+        j0 = nint(pos_adim)
+    else
+        ! Left remeshing
+        j0 = floor(pos_adim)
+    end if
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    bM=0.5*((y0-0.5)**2) - limit(1)
+    b0=0.75_WP - y0**2 + limit(1) + limit(2)
+    !bP=0.5*((y0+0.5)**2) - limit(2)
+    bP=1. - (b0+bM)
+
+    ! remeshing
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1 ! j0-1
+    buffer(j1) = buffer(j1) + bM*sca
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1 ! j0
+    buffer(j1) = buffer(j1) + b0*sca
+    j1 = modulo(j0,mesh_sc%N(dir))+1   ! j0+1
+    buffer(j1) = buffer(j1) + bP*sca
+
+end subroutine AC_remesh_limitO2_array
+
+
+!> Left remeshing formula for lambda 2 corrected and limited - array of pointer
+!! version
+!!      @param[in]       dir     = current direction
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in]       bl_type = equal 0 (resp 1) if the block is left (resp centered)
+!!      @param[in]       limit   = limitator function value associated to the right and the left scalar variations
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Note that instead of the value of the limitator funciton, it is actually
+!! these values divided by 8 wich are given as arguments. As the limitator function
+!! always appear divided by 8 in the remeshing polynom, perform this division
+!! during the computation of the limitator function enhances the performances.
+subroutine AC_remesh_limitO2_pter(pos_adim, sca, bl_type, limit, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                            :: pos_adim, sca
+    logical, intent(in)                             :: bl_type
+    real(WP), dimension(2), intent(in)              :: limit
+    type(real_pter), dimension(:), intent(inout)    :: buffer
+
+    ! Ohter local variables
+    integer     :: j0                       ! indice of the the nearest mesh points
+    real(WP)    :: bM, b0, bP               ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+
+    ! Mesh point used in remeshing formula
+    if (bl_type) then
+        ! Center remeshing
+        j0 = nint(pos_adim)
+    else
+        ! Left remeshing
+        j0 = floor(pos_adim)
+    end if
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    bM=0.5*((y0-0.5)**2) - limit(1)
+    b0=0.75_WP - y0**2 + limit(1) + limit(2)
+    !bP=0.5*((y0+0.5)**2) - limit(2)
+    bP=1. - (b0+bM)
+
+    ! remeshing
+    buffer(j0-1)%pter = buffer(j0-1)%pter + bM*sca
+    buffer(j0)%pter   = buffer(j0)%pter   + b0*sca
+    buffer(j0+1)%pter = buffer(j0+1)%pter + bP*sca
+
+end subroutine AC_remesh_limitO2_pter
+
+
+!> Corrected remeshing formula for transition from Centered block to a Left block with a different indice (tagged particles)
+!!    @param[in]       dir     = current direction
+!!    @param[in]       pos_adim= adimensionned particle position
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the second particle
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in]       limit   = limitator function value associated to the right and the left scalar variations
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @detail
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a centered block
+!!    and the last to a left one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_limitO2_tag_CL_array(dir, pos_adim, sca, posP_ad, scaP, limit, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca, posP_ad, scaP
+    real(WP), dimension(3), intent(in)      :: limit    ! to remesh particles of indices i, i+1, limitator must be known at i-1/2, i+1/2=(i+1)-1/2 and (i+1)+1/2
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP               ! indice of the the nearest mesh points
+                                            ! (they depend on the block type)
+    integer     :: j0_bis                   ! indice of the the nearest mesh point for the indP=ind+1 particle
+    real(WP)    :: aM, a0, bP, b0           ! interpolation weight for the particles
+    real(WP)    :: y0, y0_bis               ! adimensionned distance to mesh points
+
+    j0 = nint(pos_adim)
+    !j0 = nint(pos/d_sc(2))
+    j0_bis = floor(posP_ad)
+    !j0_bis = floor(posP/d_sc(2))
+
+    y0 = (pos_adim - real(j0, WP))
+    !y0 = (pos - real(j0, WP)*d_sc(2))/d_sc(2)
+    y0_bis = (posP_ad - real(j0_bis, WP))
+    !y0_bis = (posP - real(j0_bis, WP)*d_sc(2))/d_sc(2)
+
+    aM=0.5*((y0-0.5)**2) - limit(1)  ! = (lambda 2 limited) alpha(y0_bis)
+    a0=1.-aM
+    bP=0.5*((y0_bis+0.5)**2) - limit(3)  ! = (lambda 2 limited) gamma(y0_bis)
+    ! note that limit(3) is the limitator function at (i+1)+1/2), with (i+1) the
+    ! indice of particle of wieght scaP
+    b0=1.-bP
+
+    ! Remeshing
+    jM = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    jP = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    j0 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(jM)=buffer(jM)+aM*sca
+    buffer(j0)=buffer(j0)+a0*sca+b0*scaP
+    buffer(jP)=buffer(jP)+bP*scaP
+
+end subroutine AC_remesh_limitO2_tag_CL_array
+
+
+!> Corrected remeshing formula for transition from Centered block to a Left block with a different indice (tagged particles)
+!!    @param[in]       pos_adim= adimensionned particle position
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the second particle
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in]       limit   = limitator function value associated to the right and the left scalar variations
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a centered block
+!!    and the last to a left one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_limitO2_tag_CL_pter(pos_adim, sca, posP_ad, scaP, limit, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    real(WP), intent(in)                            :: pos_adim, sca, posP_ad, scaP
+    real(WP), dimension(3), intent(in)              :: limit    ! to remesh particles of indices i, i+1, limitator must be known at i-1/2, i+1/2=(i+1)-1/2 and (i+1)+1/2
+    type(real_pter), dimension(:), intent(inout)    :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP               ! indice of the the nearest mesh points
+                                            ! (they depend on the block type)
+    integer     :: j0_bis                   ! indice of the the nearest mesh point for the indP=ind+1 particle
+    real(WP)    :: aM, a0, bP, b0           ! interpolation weight for the particles
+    real(WP)    :: y0, y0_bis               ! adimensionned distance to mesh points
+
+    j0 = nint(pos_adim)
+    !j0 = nint(pos/d_sc(2))
+    j0_bis = floor(posP_ad)
+    !j0_bis = floor(posP/d_sc(2))
+    jM=j0-1
+    jP=j0+1
+
+    y0 = (pos_adim - real(j0, WP))
+    !y0 = (pos - real(j0, WP)*d_sc(2))/d_sc(2)
+    y0_bis = (posP_ad - real(j0_bis, WP))
+    !y0_bis = (posP - real(j0_bis, WP)*d_sc(2))/d_sc(2)
+
+    aM=0.5*((y0-1.)**2) - limit(1)  ! = (lambda 2 limited) alpha(y0_bis)
+    a0=1.-aM
+    bP=0.5*((y0_bis+1.)**2) - limit(3)  ! = (lambda 2 limited) gamma(y0_bis)
+    ! note that limit(3) is the limitator function at (i+1)+1/2), with (i+1) the
+    ! indice of particle of wieght scaP
+    b0=1.-bP
+
+    ! Remeshing
+    buffer(jM)%pter=buffer(jM)%pter+aM*sca
+    buffer(j0)%pter=buffer(j0)%pter+a0*sca+b0*scaP
+    buffer(jP)%pter=buffer(jP)%pter+bP*scaP
+
+end subroutine AC_remesh_limitO2_tag_CL_pter
+
+
+!> Corrected remeshing formula for transition from Left block to a Centered  block with a different indice (tagged particles)
+!!    @param[in]       dir     = current direction
+!!    @param[in]       pos_adim= adimensionned particle position
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the second particle
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in]       limit   = limitator function value associated to the right and the left scalar variations
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a left block
+!!    and the last to a centered one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_limitO2_tag_LC_array(dir, pos_adim, sca, posP_ad, scaP, limit, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca, posP_ad, scaP
+    real(WP), dimension(3), intent(in)      :: limit    ! to remesh particles of indices i, i+1, limitator must be known at i-1/2, i+1/2=(i+1)-1/2 and (i+1)+1/2
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP, jP2, jP3             ! indice of the the nearest mesh points
+                                                    ! (they depend on the block type)
+    integer     :: j0_bis                           ! indice of the the nearest mesh point for the indP=ind+1 particle
+    real(WP)    :: aM, a0, aP,aP2, b0, bP, bP2, bP3 ! interpolation weight for the particles
+    real(WP)    :: y0, y0_bis                       ! adimensionned distance to mesh points
+
+
+    ! Indice of mesh point used in order to remesh
+    j0 = floor(pos_adim)
+    !j0 = floor(pos/d_sc(2))
+    j0_bis = nint(posP_ad)
+    !j0_bis = nint(posP/d_sc(2))
+    jM=j0-1
+    jP=j0+1
+    jP2=j0+2
+    jP3=j0+3
+
+    ! Distance to mesh point
+    y0 = (pos_adim - real(j0, WP))
+    !y0 = (pos - real(j0, WP)*d_sc(2))/d_sc(2)
+    y0_bis = (posP_ad - real(j0_bis, WP))
+    !y0_bis = (posP - real(j0_bis, WP)*d_sc(2))/d_sc(2)
+
+    ! Interpolation weight
+    ! Use limit(1) and limit(2) to remesh particle i (they are limitator at i-1/2, i+1/2)
+    aM = 0.5*((y0-0.5)**2) - limit(1)
+    a0=0.75_WP - y0**2 + limit(1) + limit(2)
+    aP=y0
+    aP2=1._WP - aM - a0 - aP
+
+    ! Use limit(2) and limit(3) to remesh particle i+1 (they are limitator at i+1-1/2, i+1+1/2)
+    bP  = -y0_bis
+    bP2 = 0.75_WP - y0_bis**2 + limit(2) + limit(3)
+    bP3 = 0.5*((y0_bis+0.5)**2) - limit(3)
+    b0 = 1._WP - bP - bP2 - bP3
+
+    ! Remeshing
+    jM = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    jP = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    jP2= modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    jP3= modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
+    j0 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(jM)= buffer(jM)  +aM *sca
+    buffer(j0)= buffer(j0)  +a0 *sca+b0 *scaP
+    buffer(jP)= buffer(jP)  +aP *sca+bP *scaP
+    buffer(jP2)=buffer(jP2) +aP2*sca+bP2*scaP
+    buffer(jP3)=buffer(jP3)         +bP3*scaP
+
+end subroutine AC_remesh_limitO2_tag_LC_array
+
+
+!> Corrected remeshing formula for transition from Left block to a Centered  block with a different indice (tagged particles)
+!!    @param[in]       pos_adim= adimensionned particle position
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the second particle
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in]       limit   = limitator function value associated to the right and the left scalar variations
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a left block
+!!    and the last to a centered one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_limitO2_tag_LC_pter(pos_adim, sca, posP_ad, scaP, limit, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    real(WP), intent(in)                            :: pos_adim, sca, posP_ad, scaP
+    real(WP), dimension(3), intent(in)              :: limit    ! to remesh particles of indices i, i+1, limitator must be known at i-1/2, i+1/2=(i+1)-1/2 and (i+1)+1/2
+    type(real_pter), dimension(:), intent(inout)    :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP, jP2, jP3             ! indice of the the nearest mesh points
+                                                    ! (they depend on the block type)
+    integer     :: j0_bis                           ! indice of the the nearest mesh point for the indP=ind+1 particle
+    real(WP)    :: aM, a0, aP,aP2, b0, bP, bP2, bP3 ! interpolation weight for the particles
+    real(WP)    :: y0, y0_bis                       ! adimensionned distance to mesh points
+
+
+    ! Indice of mesh point used in order to remesh
+    j0 = floor(pos_adim)
+    !j0 = floor(pos/d_sc(2))
+    j0_bis = nint(posP_ad)
+    !j0_bis = nint(posP/d_sc(2))
+    jM=j0-1
+    jP=j0+1
+    jP2=j0+2
+    jP3=j0+3
+
+    ! Distance to mesh point
+    y0 = (pos_adim - real(j0, WP))
+    !y0 = (pos - real(j0, WP)*d_sc(2))/d_sc(2)
+    y0_bis = (posP_ad - real(j0_bis, WP))
+    !y0_bis = (posP - real(j0_bis, WP)*d_sc(2))/d_sc(2)
+
+    ! Interpolation weight
+    ! Use limit(1) and limit(2) to remesh particle i (they are limitator at i-1/2, i+1/2)
+    aM = 0.5*((y0-0.5)**2) - limit(1)
+    a0=0.75_WP - y0**2 + limit(1) + limit(2)
+    aP=y0
+    aP2=1._WP - aM - a0 - aP
+
+    ! Use limit(2) and limit(3) to remesh particle i+1 (they are limitator at i+1-1/2, i+1+1/2)
+    bP  = -y0_bis
+    bP2 = 0.75_WP - y0_bis**2 + limit(2) + limit(3)
+    bP3 = 0.5*((y0_bis+0.5)**2) - limit(3)
+    b0 = 1._WP - bP - bP2 - bP3
+
+    ! Remeshing
+    buffer(jM)%pter= buffer(jM)%pter+aM*sca
+    buffer(j0)%pter= buffer(j0)%pter+a0*sca+b0*scaP
+    buffer(jP)%pter= buffer(jP)%pter+aP*sca+bP*scaP
+    buffer(jP2)%pter=buffer(jP2)%pter+aP2*sca+bP2*scaP
+    buffer(jP3)%pter=buffer(jP3)%pter+bP3*scaP
+
+end subroutine AC_remesh_limitO2_tag_LC_pter
+
+
+! ============================================================
+! ============     Lambda 4 corrected formula     ============
+! ============================================================
+
+!> Left remeshing formula of order 4
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_O4_left_array(dir, pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Ohter local variables
+    integer     :: j0, j1                   ! indice of the the nearest mesh points
+    real(WP)    :: bM2, bM, b0, bP, bP2     ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+    !j0 = floor(pos/d_sc(2))
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2.+y0*(-1.+y0*(-2.+y0)))/24.0
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4.+y0*(4.+y0*(1.-y0)))/6.0
+    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0
+    bP =y0*(4+y0*(4-y0*(1.+y0)))/6.0
+    !bP2=(y0-1.)*y0*(y0+1.)*(y0+2.)/24.0
+    bP2=y0*(-2.+y0*(-1.+y0*(2.+y0)))/24.0
+    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
+    b0 = 1. -(bM2+bM+bP+bP2)
+
+    ! remeshing
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1  ! j0-2
+    buffer(j1) = buffer(j1) + bM2*sca
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    buffer(j1) = buffer(j1) + bM*sca
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(j1) = buffer(j1) + b0*sca
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    buffer(j1) = buffer(j1) + bP*sca
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    buffer(j1) = buffer(j1) + bP2*sca
+
+end subroutine AC_remesh_O4_left_array
+
+!> Left remeshing formula of order 4 - surcharge for pointer
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_O4_left_pter(pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    !Input/Ouput
+    real(WP), intent(in)                                :: pos_adim, sca
+    type(real_pter), dimension(:), intent(inout)        :: buffer
+    ! Ohter local variables
+    integer     :: j0                       ! indice of the the nearest mesh points
+    real(WP)    :: bM2, bM, b0, bP, bP2     ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+    ! Mesh point used in remeshing formula
+    j0 = floor(pos_adim)
+    !j0 = floor(pos/d_sc(2))
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2.0_WP+y0*(-1.0_WP+y0*(-2.0_WP+y0)))/24.0_WP
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4.0_WP+y0*(4.+y0*(1.-y0)))/6.0_WP
+    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0
+    bP =y0*(4._WP+y0*(4._WP-y0*(1._WP+y0)))/6._WP
+    !bP2=(y0-1.)*y0*(y0+1.)*(y0+2.)/24.0
+    bP2=y0*(-2._WP+y0*(-1._WP+y0*(2._WP+y0)))/24._WP
+    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
+    b0 = 1._WP -(bM2+bM+bP+bP2)
+
+    ! remeshing
+    buffer(j0-2)%pter = buffer(j0-2)%pter  + bM2*sca
+    buffer(j0-1)%pter = buffer(j0-1)%pter  + bM*sca
+    buffer(j0  )%pter = buffer(j0  )%pter  + b0*sca
+    buffer(j0+1)%pter = buffer(j0+1)%pter  + bP*sca
+    buffer(j0+2)%pter = buffer(j0+2)%pter  + bP2*sca
+
+end subroutine AC_remesh_O4_left_pter
+
+!> Centered remeshing formula of order 4 - array version
+!!      @param[in]       dir     = current direction
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_O4_center_array(dir, pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/output
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: j0,j1                    ! indice of the the nearest mesh points
+    real(WP)    :: bM2, bM, b0, bP, bP2     ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+    ! Mesh point used in remeshing formula
+    j0 = nint(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2._WP+y0*(-1._WP+y0*(-2._WP+y0)))/24._WP
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4._WP+y0*(4._WP+y0*(1._WP-y0)))/6._WP
+    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0
+    bP =y0*(4._WP+y0*(4._WP-y0*(1._WP+y0)))/6._WP
+    !bP2=(y0-1.)*y0*(y0+1.)*(y0+2.)/24.0
+    bP2=y0*(-2._WP+y0*(-1._WP+y0*(2._WP+y0)))/24._WP
+    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
+    b0 = 1._WP -(bM2+bM+bP+bP2)
+
+    ! remeshing
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1  ! j0-2
+    buffer(j1) = buffer(j1) + bM2*sca
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1  ! j0-1
+    buffer(j1) = buffer(j1) + bM*sca
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1  ! j0
+    buffer(j1) = buffer(j1) + b0*sca
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    buffer(j1) = buffer(j1) + bP*sca
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    buffer(j1) = buffer(j1) + bP2*sca
+
+end subroutine AC_remesh_O4_center_array
+
+!> Centered remeshing formula of order 4 - array version
+!!      @param[in]       pos_adim= adimensionned particle position
+!!      @param[in]       sca     = scalar advected by the particle
+!!      @param[in,out]   buffer  = temporaly remeshed scalar field
+subroutine AC_remesh_O4_center_pter(pos_adim, sca, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/output
+    real(WP), intent(in)                                        :: pos_adim, sca
+    type(real_pter), dimension(:), intent(inout)                :: buffer
+    ! Other local variables
+    integer     :: j0                       ! indice of the the nearest mesh points
+    real(WP)    :: bM2, bM, b0, bP, bP2     ! interpolation weight for the particles
+    real(WP)    :: y0                       ! adimensionned distance to mesh points
+    ! Mesh point used in remeshing formula
+    j0 = nint(pos_adim)
+
+    ! Distance to mesh points
+    y0 = (pos_adim - real(j0, WP))
+
+    ! Interpolation weights
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2._WP+y0*(-1._WP+y0*(-2._WP+y0)))/24._WP
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4._WP+y0*(4._WP+y0*(1._WP-y0)))/6._WP
+    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0
+    bP =y0*(4._WP+y0*(4._WP-y0*(1._WP+y0)))/6._WP
+    !bP2=(y0-1.)*y0*(y0+1.)*(y0+2.)/24.0
+    bP2=y0*(-2._WP+y0*(-1._WP+y0*(2.+y0)))/24._WP
+    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
+    b0 = 1._WP -(bM2+bM+bP+bP2)
+
+    ! remeshing
+    buffer(j0-2)%pter = buffer(j0-2)%pter   + bM2*sca
+    buffer(j0-1)%pter = buffer(j0-1)%pter   + bM*sca
+    buffer(j0  )%pter = buffer(j0  )%pter   + b0*sca
+    buffer(j0+1)%pter = buffer(j0+1)%pter   + bP*sca
+    buffer(j0+2)%pter = buffer(j0+2)%pter   + bP2*sca
+
+
+end subroutine AC_remesh_O4_center_pter
+
+
+!> Order 4 corrected remeshing formula for transition from Centered block to a Left block with a different indice (tagged particles)
+!! - version for array of real.
+!!    @param[in]       dir     = current direction
+!!    @param[in]       posM_ad = adimensionned position of the first particle
+!!    @param[in]       scaM    = scalar advected by the first particle
+!!    @param[in]       pos_adim= adimensionned particle position
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the second particle
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in]       posP2_ad= adimensionned position of the fourth (and last) particle
+!!    @param[in]       scaP2   = scalar advected by this particle
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a centered block
+!!    and the last to a left one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_O4_tag_CL_array(dir, posM_ad, scaM, pos_adim, sca, posP_ad, scaP, posP2_ad, scaP2, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca, posP_ad, scaP
+    real(WP), intent(in)                    :: posM_ad, scaM, posP2_ad, scaP2
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP, jP2, j1      ! indice of the the nearest mesh points
+                                            ! (they depend on the block type)
+    real(WP)    :: aM3, aM2, aM, a0         ! interpolation weight for the particles
+    real(WP)    :: bM2, bM, b0, bP          ! interpolation weight for the particles
+    real(WP)    :: cM, c0, cP, cP2          ! interpolation weight for the particles
+    real(WP)    :: e0, eP, eP2, eP3         ! interpolation weight for the particles
+    real(WP)    :: yM, y0, yP, yP2          ! adimensionned distance to mesh points for each particles
+
+    ! Indice of mesh point used in order to remesh
+    jM = nint(posM_ad)
+    j0 = nint(pos_adim)
+    jP = floor(posP_ad)
+    jP2= floor(posP2_ad)
+
+    ! Distance to mesh point
+    yM = (posM_ad  - real(jM, WP))
+    y0 = (pos_adim - real(j0, WP))
+    yP = (posP_ad  - real(jP, WP))
+    yP2= (posP2_ad - real(jP2, WP))
+
+    ! Interpolation weights
+    !aM3=(yM-2.)*(yM-1.)*yM*(yM+1.)/24.0
+    aM3=yM*(2.+yM*(-1.+yM*(-2.+yM)))/24.0
+    !aM2=(2.-yM)*(yM-1.)*yM*(yM+2.)/6.0
+    aM2=yM*(-4.+yM*(4.+yM*(1.-yM)))/6.0
+    !aM =(yM-2.)*(yM-1.)*(yM+1.)*(yM+2.)/4.0
+    aM =(4.+(yM**2)*(-5.+yM**2))/4.0
+    !a0 =((2.-yM)*yM*(yM+1.)*(yM+2.)/6.0) + ((yM-1.)*yM*(yM+1.)*(yM+2.)/24.0)
+    a0 = 1. - (aM3+aM2+aM)
+
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2.+y0*(-1.+y0*(-2.+y0)))/24.0
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4.+y0*(4.+y0*(1.-y0)))/6.0
+    !bP =((y0+1)-1.)*(y0+1)*((y0+1)+1.)*((y0+1)+2.)/24.0
+    bP =y0*(6.+y0*(11+y0*(6+y0)))/24.0
+    !b0 =((y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0) + ((2.-y0)*y0*(y0+1.)*(y0+2.)/6.0) &
+    !        & + ((y0-1.)*y0*(y0+1.)*(y0+2.)/24.0) - bP
+    b0 = 1. - (bM2+bM+bP)
+
+    !cM =((yP-1.)-2.)*((yP-1.)-1.)*(yP-1.)*((yP-1.)+1.)/24.0
+    cM =yP*(-6.+yP*(11.+yP*(-6.+yP)))/24.0
+    !cP =(2.-yP)*yP*(yP+1.)*(yP+2.)/6.0
+    cP =yP*(4.+yP*(4.-yP*(1.+yP)))/6.0
+    !cP2=(yP-1.)*yP*(yP+1.)*(yP+2.)/24.0
+    cP2=yP*(-2.+yP*(-1.+yP*(2.+yP)))/24.0
+    !c0 =((yP-2.)*(yP-1.)*yP*(yP+1.)/24.0)+((2.-yP)*(yP-1.)*yP*(yP+2.)/6.0) &
+    !        & + ((yP-2.)*(yP-1.)*(yP+1.)*(yP+2.)/4.0) - cM
+    c0 = 1. - (cM+cP+cP2)
+
+    !eP =(yP2-2.)*(yP2-1.)*(yP2+1.)*(yP2+2.)/4.0
+    eP =1.+((yP2**2)*(-5+yP2**2)/4.0)
+    !eP2=(2.-yP2)*yP2*(yP2+1.)*(yP2+2.)/6.0
+    eP2=yP2*(4.+yP2*(4.-yP2*(1+yP2)))/6.0
+    !eP3=(yP2-1.)*yP2*(yP2+1.)*(yP2+2.)/24.0
+    eP3=yP2*(-2.+yP2*(-1.+yP2*(2+yP2)))/24.0
+    !e0 =((yP2-2.)*(yP2-1.)*yP2*(yP2+1.)/24.0) + ((2.-yP2)*(yP2-1.)*yP2*(yP2+2.)/6.0)
+    e0 = 1. - (eP+eP2+eP3)
+
+    ! -- remeshing --
+    ! j0-3
+    j1 = modulo(j0-4,mesh_sc%N(dir))+1
+    buffer(j1) = buffer(j1) + aM3*scaM
+    ! j0-2
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1
+    buffer(j1) = buffer(j1) + aM2*scaM + bM2*sca
+    ! j0-1
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1
+    buffer(j1) = buffer(j1) + aM*scaM  + bM*sca   + cM*scaP
+    ! j0
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1
+    buffer(j1) = buffer(j1) + a0*scaM  + b0*sca   + c0*scaP  + e0*scaP2
+    ! j0+1
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    buffer(j1) = buffer(j1)            + bP*sca   + cP*scaP  + eP*scaP2
+    ! j0+2
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    buffer(j1) = buffer(j1)                       + cP2*scaP + ep2*scaP2
+    ! j0+3
+    j1 = modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
+    buffer(j1) = buffer(j1)                                  + ep3*scaP2
+
+end subroutine AC_remesh_O4_tag_CL_array
+
+
+!> Order 4 corrected remeshing formula for transition from Centered block to a Left block with a different indice (tagged particles)
+!! - version for array of pointer.
+!!    @param[in]       posM_ad = adimensionned position of the first particle
+!!    @param[in]       scaM    = scalar advected by the first particle
+!!    @param[in]       pos_adim= adimensionned particle position
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the second particle
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in]       posP2_ad= adimensionned position of the fourth (and last) particle
+!!    @param[in]       scaP2   = scalar advected by this particle
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a centered block
+!!    and the last to a left one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_O4_tag_CL_pter(posM_ad, scaM, pos_adim, sca, posP_ad, scaP, posP2_ad, scaP2, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    real(WP), intent(in)                            :: pos_adim, sca, posP_ad, scaP
+    real(WP), intent(in)                            :: posM_ad, scaM, posP2_ad, scaP2
+    type(real_pter), dimension(:), intent(inout)    :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP, jP2          ! indice of the the nearest mesh points
+                                            ! (they depend on the block type)
+    real(WP)    :: aM3, aM2, aM, a0         ! interpolation weight for the particles
+    real(WP)    :: bM2, bM, b0, bP          ! interpolation weight for the particles
+    real(WP)    :: cM, c0, cP, cP2          ! interpolation weight for the particles
+    real(WP)    :: e0, eP, eP2, eP3         ! interpolation weight for the particles
+    real(WP)    :: yM, y0, yP, yP2          ! adimensionned distance to mesh points for each particles
+
+    ! Indice of mesh point used in order to remesh
+    jM = nint(posM_ad)
+    j0 = nint(pos_adim)
+    jP = floor(posP_ad)
+    jP2= floor(posP2_ad)
+
+    ! Distance to mesh point
+    yM = (posM_ad  - real(jM, WP))
+    y0 = (pos_adim - real(j0, WP))
+    yP = (posP_ad  - real(jP, WP))
+    yP2= (posP2_ad - real(jP2, WP))
+
+    ! Interpolation weights
+    !aM3=(yM-2.)*(yM-1.)*yM*(yM+1.)/24.0
+    aM3=yM*(2._WP+yM*(-1._WP+yM*(-2._WP+yM)))/24._WP
+    !aM2=(2.-yM)*(yM-1.)*yM*(yM+2.)/6.0
+    aM2=yM*(-4._WP+yM*(4._WP+yM*(1._WP-yM)))/6._WP
+    !aM =(yM-2.)*(yM-1.)*(yM+1.)*(yM+2.)/4.0
+    aM =(4._WP+(yM**2._WP)*(-5._WP+yM**2))/4._WP
+    !a0 =((2.-yM)*yM*(yM+1.)*(yM+2.)/6.0) + ((yM-1.)*yM*(yM+1.)*(yM+2.)/24.0)
+    a0 = 1._WP - (aM3+aM2+aM)
+
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2._WP+y0*(-1._WP+y0*(-2._WP+y0)))/24._WP
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4._WP+y0*(4._WP+y0*(1._WP-y0)))/6._WP
+    !bP =((y0+1)-1.)*(y0+1)*((y0+1)+1.)*((y0+1)+2.)/24.0
+    bP =y0*(6._WP+y0*(11._WP+y0*(6._WP+y0)))/24._WP
+    !b0 =((y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0) + ((2.-y0)*y0*(y0+1.)*(y0+2.)/6.0) &
+    !        & + ((y0-1.)*y0*(y0+1.)*(y0+2.)/24.0) - bP
+    b0 = 1._WP - (bM2+bM+bP)
+
+    !cM =((yP-1.)-2.)*((yP-1.)-1.)*(yP-1.)*((yP-1.)+1.)/24.0
+    cM =yP*(-6._WP+yP*(11._WP+yP*(-6._WP+yP)))/24._WP
+    !cP =(2.-yP)*yP*(yP+1.)*(yP+2.)/6.0
+    cP =yP*(4._WP+yP*(4._WP-yP*(1._WP+yP)))/6._WP
+    !cP2=(yP-1.)*yP*(yP+1.)*(yP+2.)/24.0
+    cP2=yP*(-2._WP+yP*(-1._WP+yP*(2._WP+yP)))/24._WP
+    !c0 =((yP-2.)*(yP-1.)*yP*(yP+1.)/24.0)+((2.-yP)*(yP-1.)*yP*(yP+2.)/6.0) &
+    !        & + ((yP-2.)*(yP-1.)*(yP+1.)*(yP+2.)/4.0) - cM
+    c0 = 1._WP - (cM+cP+cP2)
+
+    !eP =(yP2-2.)*(yP2-1.)*(yP2+1.)*(yP2+2.)/4.0
+    eP =1._WP+((yP2**2)*(-5._WP+yP2**2)/4._WP)
+    !eP2=(2.-yP2)*yP2*(yP2+1.)*(yP2+2.)/6.0
+    eP2=yP2*(4._WP+yP2*(4._WP-yP2*(1._WP+yP2)))/6._WP
+    !eP3=(yP2-1.)*yP2*(yP2+1.)*(yP2+2.)/24.0
+    eP3=yP2*(-2._WP+yP2*(-1._WP+yP2*(2._WP+yP2)))/24._WP
+    !e0 =((yP2-2.)*(yP2-1.)*yP2*(yP2+1.)/24.0) + ((2.-yP2)*(yP2-1.)*yP2*(yP2+2.)/6.0)
+    e0 = 1._WP - (eP+eP2+eP3)
+
+    ! remeshing
+    buffer(j0-3)%pter = buffer(j0-3)%pter +aM3*scaM
+    buffer(j0-2)%pter = buffer(j0-2)%pter +aM2*scaM +bM2*sca
+    buffer(j0-1)%pter = buffer(j0-1)%pter + aM*scaM + bM*sca  + cM*scaP
+    buffer(j0  )%pter = buffer(j0  )%pter + a0*scaM + b0*sca  + c0*scaP + e0*scaP2
+    buffer(j0+1)%pter = buffer(j0+1)%pter           + bP*sca  + cP*scaP + eP*scaP2
+    buffer(j0+2)%pter = buffer(j0+2)%pter                     +cP2*scaP +eP2*scaP2
+    buffer(j0+3)%pter = buffer(j0+3)%pter                               +eP3*scaP2
+
+end subroutine AC_remesh_O4_tag_CL_pter
+
+
+!> Corrected remeshing formula of order 3 for transition from Left block to a centered
+!! block with a different indice (tagged particles). Use it for lambda 4 corrected scheme.
+!! - version for array of real.
+!!    @param[in]       dir     = current direction
+!!    @param[in]       posM_ad = adimensionned position of the first particle
+!!    @param[in]       scaM    = scalar advected by the first particle
+!!    @param[in]       pos_adim= adimensionned position of the second particle (the last of the first block)
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the third particle (wich is the first of the second block)
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in]       posP2_ad= adimensionned position of the fourth (and last) particle
+!!    @param[in]       scaP2   = scalar advected by this particle
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a left block
+!!    and the last to a centered one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_O4_tag_LC_array(dir, posM_ad, scaM, pos_adim, sca, posP_ad, scaP, posP2_ad, scaP2, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    integer, intent(in)                     :: dir
+    real(WP), intent(in)                    :: pos_adim, sca, posP_ad, scaP
+    real(WP), intent(in)                    :: posM_ad, scaM, posP2_ad, scaP2
+    real(WP), dimension(:), intent(inout)   :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP, jP2          ! indice of the the nearest mesh points
+                                            ! (they depend on the block type)
+    integer     :: j1                       ! equal to previous j but with a modulo
+    real(WP)    :: aM3, aM2, aM, a0, aP,aP2 ! interpolation weight for the particles
+    real(WP)    :: bM2, bM, b0, bP, bP2,bP3 ! interpolation weight for the particles
+    real(WP)    :: cM, c0, cP, cP2, cP3,cP4 ! interpolation weight for the particles
+    real(WP)    :: e0, eP, eP2, eP3,eP4,ep5 ! interpolation weight for the particles
+    real(WP)    :: yM, y0, yP, yP2          ! adimensionned distance to mesh points for each particles
+
+
+    ! Indice of mesh point used in order to remesh
+    jM = floor(posM_ad)
+    j0 = floor(pos_adim)
+    jP = nint(posP_ad)
+    jP2= nint(posP2_ad)
+
+    ! Distance to mesh point
+    yM = (posM_ad  - real(jM, WP))
+    y0 = (pos_adim - real(j0, WP))
+    yP = (posP_ad  - real(jP, WP))
+    yP2= (posP2_ad - real(jP2, WP))
+
+    ! Interpolation weights
+    !aM3=(yM-2.)*(yM-1.)*yM*(yM+1.)/24.0
+    aM3=yM*(2.+yM*(-1.+yM*(-2.+yM)))/24.0
+    !aM2=(2.-yM)*(yM-1.)*yM*(yM+2.)/6.0
+    aM2 =yM*(-4.+yM*(4.+yM*(1.-yM)))/6.0
+    !aM =(yM-2.)*(yM-1.)*(yM+1.)*(yM+2.)/4.0
+    aM =(4.+(yM**2)*(-5.+yM**2))/4.0
+    !a0 =((2.-yM)*yM*(yM+1.)*(yM+2.)/6.0)
+    a0 =yM*(4+yM*(4-yM*(1.+yM)))/6.0
+    !aP2=(((yM-1.)-1.)*(yM-1.)*((yM-1.)+1.)*((yM-1.)+2.)/24.0)
+    !aP2=yM*(yM-2.)*(yM-1.)*(yM+1.)/24.0
+    aP2=aM3
+    !aP =((yM-1.)*yM*(yM+1.)*(yM+2.)/24.0) - aP2
+    !aP = 1.0 - (aM3+aM2+aM+a0+aP2)
+    aP = 1.0 - (2.*aM3+aM2+aM+a0)
+
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2.+y0*(-1.+y0*(-2.+y0)))/24.0
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4.+y0*(4.+y0*(1.-y0)))/6.0
+    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
+    b0 =(4.+(y0**2)*(-5.+y0**2))/4.0
+    !bP2=(2.-(y0-1.))*(y0-1.)*((y0-1.)+1.)*((y0-1.)+2.)/6.0
+    !bP2=y0*(3.-y0)*(y0-1.)*(y0+1.)/6.0
+    bP2=y0*(-3.+y0*(1.+y0*(3.-y0)))/6.0
+    !bP3=((y0-1.)-1.)*(y0-1.)*((y0-1.)+1.)*((y0-1.)+2.)/24.0
+    !bP3=y0*(y0-2.)*(y0-1.)*(y0+1.)/24.0
+    bP3 = bM2
+    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0 + ((y0-1.)*y0*(y0+1.)*(y0+2.)/24.0) &
+    !       & - (bP2 + bP3)
+    !bP = 1.0 - (bM2 + bM + b0 + bP2 + bP3)
+    bP = 1.0 - (2*bM2 + bM + b0 + bP2)
+
+    !cM =((yP+1)-2.)*((yP+1)-1.)*(yP+1)*((yP+1)+1.)/24.0
+    cM =(yP-1.)*yP*(yP+1)*(yP+2.)/24.0
+    !cM =yP*(-2.+yP*(-1.+yP*(2.+yP)))/24.0
+    !c0 =(2.-(yP+1))*((yP+1)-1.)*(yP+1)*((yP+1)+2.)/6.0
+    !c0 =(1.-yP)*yP*(yP+1)*(yP+3.)/6.0
+    c0 =yP*(3.+yP*(1.-yP*(3.+yP)))/6.0
+    !cP2=(yP-2.)*(yP-1.)*(yP+1.)*(yP+2.)/4.0
+    cP2=(4.+(yP**2)*(-5.+yP**2))/4.0
+    !cP3=(2.-yP)*yP*(yP+1.)*(yP+2.)/6.0
+    cP3=yP*(4+yP*(4-yP*(1.+yP)))/6.0
+    !cP4=(yP-1.)*yP*(yP+1.)*(yP+2.)/24.0
+    cP4=cM
+    !cP =(yP-2.)*(yP-1.)*yP*(yP+1.)/24.0 + ((2.-yP)*(yP-1.)*yP*(yP+2.)/6.0) &
+    !        & - (cM + c0)
+    cP = 1.0 - (cM+c0+cP2+cP3+cP4)
+
+    !e0 =((yP2+1)-2.)*((yP2+1)-1.)*(yP2+1)*((yP2+1)+1.)/24.0
+    !e0 =(yP2-1.)*yP2*(yP2+1)*(yP2+2.)/24.0
+    e0 =yP2*(-2.+yP2*(-1.+yP2*(2.+yP2)))/24.0
+    !eP2=(2.-yP2)*(yP2-1.)*yP2*(yP2+2.)/6.0
+    eP2=yP2*(-4.+yP2*(4.+yP2*(1.-yP2)))/6.0
+    !eP3=(yP2-2.)*(yP2-1.)*(yP2+1.)*(yP2+2.)/4.0
+    eP3=(4.+(yP2**2)*(-5.+yP2**2))/4.0
+    !eP4=(2.-yP2)*yP2*(yP2+1.)*(yP2+2.)/6.0
+    eP4=yP2*(4+yP2*(4-yP2*(1.+yP2)))/6.0
+    !eP5=(yP2-1.)*yP2*(yP2+1.)*(yP2+2.)/24.0
+    eP5=e0
+    !eP =((yP2-2.)*(yP2-1.)*yP2*(yP2+1.)/24.0) - e0
+    eP = 1.0 - (e0+eP2+eP3+eP4+eP5)
+
+    ! remeshing
+    ! j0-3
+    j1 = modulo(j0-4,mesh_sc%N(dir))+1
+    buffer(j1) = buffer(j1) + aM3*scaM
+    ! j0-2
+    j1 = modulo(j0-3,mesh_sc%N(dir))+1
+    buffer(j1) = buffer(j1) + aM2*scaM + bM2*sca
+    ! j0-1
+    j1 = modulo(j0-2,mesh_sc%N(dir))+1
+    buffer(j1) = buffer(j1) + aM*scaM  + bM*sca   + cM*scaP
+    ! j0
+    j1 = modulo(j0-1,mesh_sc%N(dir))+1
+    buffer(j1) = buffer(j1) + a0*scaM  + b0*sca   + c0*scaP  + e0*scaP2
+    ! j0+1
+    j1 = modulo(j0,mesh_sc%N(dir))+1    ! j0+1
+    buffer(j1) = buffer(j1) + aP*scaM  + bP*sca   + cP*scaP  + eP*scaP2
+    ! j0+2
+    j1 = modulo(j0+1,mesh_sc%N(dir))+1  ! j0+2
+    buffer(j1) = buffer(j1)  + aP2*scaM + bP2*sca + cP2*scaP + ep2*scaP2
+    ! j0+3
+    j1 = modulo(j0+2,mesh_sc%N(dir))+1  ! j0+3
+    buffer(j1) = buffer(j1)             + bP3*sca + cP3*scaP + ep3*scaP2
+    ! j0+3
+    j1 = modulo(j0+3,mesh_sc%N(dir))+1  ! j0+3
+    buffer(j1) = buffer(j1)                       + cP4*scaP + ep4*scaP2
+    ! j0+3
+    j1 = modulo(j0+4,mesh_sc%N(dir))+1  ! j0+5
+    buffer(j1) = buffer(j1)                                  + ep5*scaP2
+
+end subroutine AC_remesh_O4_tag_LC_array
+
+
+!> Corrected remeshing formula of order 3 for transition from Left block to a centered
+!! block with a different indice (tagged particles). Use it for lambda 4 corrected scheme.
+!! - version for array of pointer.
+!!    @param[in]       posM_ad = adimensionned position of the first particle
+!!    @param[in]       scaM    = scalar advected by the first particle
+!!    @param[in]       pos_adim= adimensionned position of the second particle (the last of the first block)
+!!    @param[in]       sca     = scalar advected by this particle
+!!    @param[in]       posP_ad = adimensionned position of the third particle (wich is the first of the second block)
+!!    @param[in]       scaP    = scalar advected by this particle
+!!    @param[in]       posP2_ad= adimensionned position of the fourth (and last) particle
+!!    @param[in]       scaP2   = scalar advected by this particle
+!!    @param[in,out]   buffer  = temporaly remeshed scalar field
+!! @details
+!!    Remeshing formula devoted to tagged particles.
+!!    The particle group send into argument is composed of a block end and of the
+!!    begining of the next block. The first particles belong to a left block
+!!    and the last to a centered one. The block have difference indice (tagged
+!!    particles) and we have to use corrected formula.
+subroutine AC_remesh_O4_tag_LC_pter(posM_ad, scaM, pos_adim, sca, posP_ad, scaP, posP2_ad, scaP2, buffer)
+
+    use cart_topology
+    use advec_variables ! contains info about solver parameters and others.
+
+    ! Input/Output
+    real(WP), intent(in)                            :: pos_adim, sca, posP_ad, scaP
+    real(WP), intent(in)                            :: posM_ad, scaM, posP2_ad, scaP2
+    type(real_pter), dimension(:), intent(inout)    :: buffer
+    ! Other local variables
+    integer     :: jM, j0, jP, jP2          ! indice of the the nearest mesh points
+                                            ! (they depend on the block type)
+    real(WP)    :: aM3, aM2, aM, a0, aP,aP2 ! interpolation weight for the particles
+    real(WP)    :: bM2, bM, b0, bP, bP2,bP3 ! interpolation weight for the particles
+    real(WP)    :: cM, c0, cP, cP2, cP3,cP4 ! interpolation weight for the particles
+    real(WP)    :: e0, eP, eP2, eP3,eP4,ep5 ! interpolation weight for the particles
+    real(WP)    :: yM, y0, yP, yP2          ! adimensionned distance to mesh points for each particles
+
+
+    ! Indice of mesh point used in order to remesh
+    jM = floor(posM_ad)
+    j0 = floor(pos_adim)
+    jP = nint(posP_ad)
+    jP2= nint(posP2_ad)
+
+    ! Distance to mesh point
+    yM = (posM_ad  - real(jM, WP))
+    y0 = (pos_adim - real(j0, WP))
+    yP = (posP_ad  - real(jP, WP))
+    yP2= (posP2_ad - real(jP2, WP))
+
+    ! Interpolation weights
+    !aM3=(yM-2.)*(yM-1.)*yM*(yM+1.)/24.0
+    aM3=yM*(2._WP+yM*(-1._WP+yM*(-2._WP+yM)))/24._WP
+    !aM2=(2.-yM)*(yM-1.)*yM*(yM+2.)/6.0
+    aM2 =yM*(-4._WP+yM*(4._WP+yM*(1._WP-yM)))/6._WP
+    !aM =(yM-2.)*(yM-1.)*(yM+1.)*(yM+2.)/4.0
+    aM =(4._WP+(yM**2)*(-5._WP+yM**2))/4._WP
+    !a0 =((2.-yM)*yM*(yM+1.)*(yM+2.)/6.0)
+    a0 =yM*(4._WP+yM*(4._WP-yM*(1._WP+yM)))/6._WP
+    !aP2=(((yM-1.)-1.)*(yM-1.)*((yM-1.)+1.)*((yM-1.)+2.)/24.0)
+    !aP2=yM*(yM-2.)*(yM-1.)*(yM+1.)/24.0
+    aP2=aM3
+    !aP =((yM-1.)*yM*(yM+1.)*(yM+2.)/24.0) - aP2
+    !aP = 1.0 - (aM3+aM2+aM+a0+aP2)
+    aP = 1._WP - (2._WP*aM3+aM2+aM+a0)
+
+    !bM2=(y0-2.)*(y0-1.)*y0*(y0+1.)/24.0
+    bM2=y0*(2._WP+y0*(-1._WP+y0*(-2._WP+y0)))/24._WP
+    !bM =(2.-y0)*(y0-1.)*y0*(y0+2.)/6.0
+    bM =y0*(-4._WP+y0*(4._WP+y0*(1._WP-y0)))/6._WP
+    !b0 =(y0-2.)*(y0-1.)*(y0+1.)*(y0+2.)/4.0
+    b0 =(4._WP+(y0**2)*(-5._WP+y0**2))/4._WP
+    !bP2=(2.-(y0-1.))*(y0-1.)*((y0-1.)+1.)*((y0-1.)+2.)/6.0
+    !bP2=y0*(3.-y0)*(y0-1.)*(y0+1.)/6.0
+    bP2=y0*(-3._WP+y0*(1._WP+y0*(3._WP-y0)))/6._WP
+    !bP3=((y0-1.)-1.)*(y0-1.)*((y0-1.)+1.)*((y0-1.)+2.)/24.0
+    !bP3=y0*(y0-2.)*(y0-1.)*(y0+1.)/24.0
+    bP3 = bM2
+    !bP =(2.-y0)*y0*(y0+1.)*(y0+2.)/6.0 + ((y0-1.)*y0*(y0+1.)*(y0+2.)/24.0) &
+    !       & - (bP2 + bP3)
+    !bP = 1.0 - (bM2 + bM + b0 + bP2 + bP3)
+    bP = 1._WP - (2._WP*bM2 + bM + b0 + bP2)
+
+    !cM =((yP+1)-2.)*((yP+1)-1.)*(yP+1)*((yP+1)+1.)/24.0
+    !cM =(yP-1._WP)*yP*(yP+1._WP)*(yP+2._WP)/24._WP
+    cM =yP*(-2._WP+yP*(-1._WP+yP*(2._WP+yP)))/24._WP
+    !c0 =(2.-(yP+1))*((yP+1)-1.)*(yP+1)*((yP+1)+2.)/6.0
+    !c0 =(1.-yP)*yP*(yP+1)*(yP+3.)/6.0
+    c0 =yP*(3._WP+yP*(1._WP-yP*(3._WP+yP)))/6._WP
+    !cP2=(yP-2.)*(yP-1.)*(yP+1.)*(yP+2.)/4.0
+    cP2=(4._WP+(yP**2)*(-5._WP+yP**2))/4._WP
+    !cP3=(2.-yP)*yP*(yP+1.)*(yP+2.)/6.0
+    cP3=yP*(4._WP+yP*(4._WP-yP*(1._WP+yP)))/6._WP
+    !cP4=(yP-1.)*yP*(yP+1.)*(yP+2.)/24.0
+    cP4=cM
+    !cP =(yP-2.)*(yP-1.)*yP*(yP+1.)/24.0 + ((2.-yP)*(yP-1.)*yP*(yP+2.)/6.0) &
+    !        & - (cM + c0)
+    cP = 1._WP - (cM+c0+cP2+cP3+cP4)
+
+    !e0 =((yP2+1)-2.)*((yP2+1)-1.)*(yP2+1)*((yP2+1)+1.)/24.0
+    !e0 =(yP2-1.)*yP2*(yP2+1)*(yP2+2.)/24.0
+    e0 =yP2*(-2._WP+yP2*(-1._WP+yP2*(2._WP+yP2)))/24._WP
+    !eP2=(2.-yP2)*(yP2-1.)*yP2*(yP2+2.)/6.0
+    eP2=yP2*(-4._WP+yP2*(4._WP+yP2*(1._WP-yP2)))/6._WP
+    !eP3=(yP2-2.)*(yP2-1.)*(yP2+1.)*(yP2+2.)/4.0
+    eP3=(4._WP+(yP2**2)*(-5._WP+yP2**2))/4._WP
+    !eP4=(2.-yP2)*yP2*(yP2+1.)*(yP2+2.)/6.0
+    eP4=yP2*(4._WP+yP2*(4._WP-yP2*(1._WP+yP2)))/6._WP
+    !eP5=(yP2-1.)*yP2*(yP2+1.)*(yP2+2.)/24.0
+    eP5=e0
+    !eP =((yP2-2.)*(yP2-1.)*yP2*(yP2+1.)/24.0) - e0
+    eP = 1._WP - (e0+eP2+eP3+eP4+eP5)
+
+    ! remeshing
+    buffer(j0-3)%pter = buffer(j0-3)%pter +aM3*scaM
+    buffer(j0-2)%pter = buffer(j0-2)%pter +aM2*scaM +bM2*sca
+    buffer(j0-1)%pter = buffer(j0-1)%pter + aM*scaM + bM*sca  + cM*scaP
+    buffer(j0  )%pter = buffer(j0  )%pter + a0*scaM + b0*sca  + c0*scaP + e0*scaP2
+    buffer(j0+1)%pter = buffer(j0+1)%pter + aP*scaM + bP*sca  + cP*scaP + eP*scaP2
+    buffer(j0+2)%pter = buffer(j0+2)%pter +aP2*scaM +bP2*sca  +cP2*scaP +eP2*scaP2
+    buffer(j0+3)%pter = buffer(j0+3)%pter           +bP3*sca  +cP3*scaP +eP3*scaP2
+    buffer(j0+4)%pter = buffer(j0+4)%pter                     +cP4*scaP +eP4*scaP2
+    buffer(j0+5)%pter = buffer(j0+5)%pter                               +eP5*scaP2
+
+end subroutine AC_remesh_O4_tag_LC_pter
+
+
+end module advec_remeshing_lambda
+!> @}
diff --git a/HySoP/src/scalesReduced/particles/advec_type.f90 b/HySoP/src/scalesReduced/particles/advec_type.f90
new file mode 100644
index 0000000000000000000000000000000000000000..6a799123bd6427c429097e42273d3886ce2b140d
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_type.f90
@@ -0,0 +1,198 @@
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_abstract_proc
+!
+!
+! DESCRIPTION:
+!> The module ``advec_abstract_procedure'' gather all user abstract procedure that are used by the different advection
+!! modules. It allow to share that function/procediure profile and to safetly use procedural argument or pointer.
+!!
+!! This module is not supposed to be used by the main code but only by the other advection module.
+!! More precisly, a final user must only used the generic "advec" module wich contains all the interface
+!! to initialize the solver (eg choosing the remeshing formula and the dimension splitting) and to solve
+!! the advection equation with the particle method.
+!!
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_abstract_proc
+
+    implicit none
+
+
+    ! --- Abstract profile of subroutine used as wrapper for remeshing ---
+    ! Such a procedure will call all the needed other subroutine to
+    ! remesh in a buffer (procedure itself use a AC_remesh_line_pter subroutine)
+    ! and to redristibute this buffer into the scalar field (and deal with
+    ! all the communication)
+    abstract interface
+      subroutine AC_init_p_V(V_comp, j, k, Gsize, p_V)
+
+        use precision_tools
+        implicit none
+
+        ! Input/Output
+        integer, intent(in)                       :: j,k
+        integer, dimension(2), intent(in)         :: Gsize
+        real(WP), dimension(:,:,:),intent(out)    :: p_V
+        real(WP), dimension(:,:,:), intent(in)    :: V_comp
+
+      end subroutine AC_init_p_V
+    end interface
+
+    ! --- Abstract profile of subroutine used as wrapper for remeshing ---
+    ! Such a procedure will call all the needed other subroutine to
+    ! remesh in a buffer (procedure itself use a AC_remesh_line_pter subroutine)
+    ! and to redristibute this buffer into the scalar field (and deal with
+    ! all the communication)
+    abstract interface
+        subroutine AC_remesh(direction, ind_group, gs, p_pos_adim, p_V, j, k, scal, dt)
+
+            use precision_tools
+            implicit none
+
+            ! Input/Output
+            integer, intent(in)                         :: direction
+            integer, dimension(2), intent(in)           :: ind_group
+            integer, dimension(2), intent(in)           :: gs
+            integer, intent(in)                         :: j, k
+            real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+            real(WP), dimension(:,:,:), intent(in)      :: p_V          ! particles velocity
+            real(WP), dimension(:,:,:), intent(inout)   :: scal
+            real(WP), intent(in)                        :: dt
+
+        end subroutine AC_remesh
+    end interface
+
+
+    ! --- Abstract profile of subroutine used to compute limitator function ---
+    ! Note that such a function actually computes limitator/8 as it is always
+    ! this fraction which appears in the remeshing polynoms (and thsu directly
+    ! divided limitator function by 8 avoids to have to do it several times later)
+    abstract interface
+        !!    @param[in]        gp_s        = size of a group (ie number of line it gathers along the two other directions)
+        !!    @param[in]        ind_group   = coordinate of the current group of lines
+        !!    @param[in]        p_pos       = particles position
+        !!    @param[in]        scalar      = scalar advected by particles
+        !!    @param[out]       limit       = limitator function
+        subroutine advec_limitator_group(gp_s, ind_group, j, k, p_pos, &
+                & scalar, limit)
+
+            use precision_tools
+            implicit none
+
+            integer, dimension(2),intent(in)                            :: gp_s         ! groupe size
+            integer, dimension(2), intent(in)                           :: ind_group    ! group indice
+            integer , intent(in)                                        :: j,k          ! bloc coordinates
+            real(WP), dimension(:,:,:), intent(in)                      :: p_pos        ! particle position
+            real(WP), dimension(:,:,:), intent(in)                      :: scalar       ! scalar field to advect
+            real(WP), dimension(:,:,:), intent(out)                     :: limit        ! limitator function
+
+        end subroutine advec_limitator_group
+    end interface
+
+    ! --- Abstract profile of subroutine used to remesh scalar inside a buffer - lambda formula ---
+    abstract interface
+        subroutine remesh_in_buffer_type(gs, j, k, ind_min, p_pos_adim, bl_type, bl_tag, send_min, send_max, &
+        & scalar, buffer, pos_in_buffer)
+
+            use precision_tools
+            implicit none
+
+            ! Input/Output
+            integer, dimension(2), intent(in)           :: gs
+            integer, intent(in)                         :: j, k
+            integer, intent(in)                         :: ind_min
+            real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+            logical, dimension(:,:,:), intent(in)       :: bl_type      ! is the particle block a center block or a left one ?
+            logical, dimension(:,:,:), intent(in)       :: bl_tag       ! indice of tagged particles
+            integer, dimension(:,:), intent(in)         :: send_min     ! distance between me and processus wich send me information
+            integer, dimension(:,:), intent(in)         :: send_max     ! distance between me and processus wich send me information
+            real(WP), dimension(:,:,:), intent(inout)   :: scalar       ! the initial scalar field transported by particles
+            real(WP),dimension(:), intent(out), target  :: buffer       ! buffer where particles are remeshed
+            integer, dimension(:), intent(inout)        :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
+                                                                        ! in part corresponding to different processes
+
+        end subroutine remesh_in_buffer_type
+    end interface
+
+    ! --- Abstract profile of subroutine used to remesh scalar inside a buffer - limited lambda formula ---
+    abstract interface
+        subroutine remesh_in_buffer_limit(gs, j, k, ind_min, p_pos_adim, bl_type, bl_tag, limit,&
+            & send_min, send_max, scalar, buffer, pos_in_buffer)
+
+            use precision_tools
+            implicit none
+
+            ! Input/Output
+            integer, dimension(2), intent(in)           :: gs
+            integer, intent(in)                         :: j, k
+            integer, intent(in)                         :: ind_min
+            real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+            logical, dimension(:,:,:), intent(in)       :: bl_type      ! is the particle block a center block or a left one ?
+            logical, dimension(:,:,:), intent(in)       :: bl_tag       ! indice of tagged particles
+            real(WP), dimension(:,:,:), intent(in)              :: limit        ! limitator function (divided by 8)
+            integer, dimension(:,:), intent(in)         :: send_min     ! distance between me and processus wich send me information
+            integer, dimension(:,:), intent(in)         :: send_max     ! distance between me and processus wich send me information
+            real(WP), dimension(:,:,:), intent(inout)   :: scalar       ! the initial scalar field transported by particles
+            real(WP),dimension(:), intent(out), target  :: buffer       ! buffer where particles are remeshed
+            integer, dimension(:), intent(inout)        :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
+                                                                        ! in part corresponding to different processes
+
+        end subroutine remesh_in_buffer_limit
+    end interface
+
+    ! --- Abstract profile of subroutine used to remesh scalar inside a buffer - variant with no type/tag ---
+    abstract interface
+        subroutine remesh_in_buffer_notype(gs, j, k, ind_min, p_pos_adim, send_min, send_max, &
+        & scalar, buffer, pos_in_buffer)
+
+            use precision_tools
+            implicit none
+
+            ! Input/Output
+            integer, dimension(2), intent(in)           :: gs
+            integer, intent(in)                         :: j, k
+            integer, intent(in)                         :: ind_min
+            real(WP), dimension(:,:,:), intent(in)      :: p_pos_adim   ! adimensionned particles position
+            integer, dimension(:,:), intent(in)         :: send_min     ! distance between me and processus wich send me information
+            integer, dimension(:,:), intent(in)         :: send_max     ! distance between me and processus wich send me information
+            real(WP), dimension(:,:,:), intent(inout)   :: scalar       ! the initial scalar field transported by particles
+                                                                        ! the right remeshing formula
+            real(WP),dimension(:), intent(out), target  :: buffer       ! buffer where particles are remeshed
+            integer, dimension(:), intent(inout)        :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
+                                                                        ! in part corresponding to different processes
+
+        end subroutine remesh_in_buffer_notype
+    end interface
+
+    ! --- Abstract profile of subroutine used to redistribute a buffer
+    ! containing remeshing particle to a scalar field ---
+    abstract interface
+        subroutine remesh_buffer_to_scalar(gs, j, k, ind_proc, gap, begin_i1, cartography, buffer, scalar, beg_buffer)
+
+            use precision_tools
+            implicit none
+
+            ! Input/Output
+            integer, dimension(2), intent(in)           :: gs
+            integer, intent(in)                         :: j, k
+            integer, intent(in)                         :: ind_proc     ! to read the good cartography associate to the processus which send me the buffer.
+            integer,intent(in)                          :: gap          ! gap between my local indices and the local indices from another processes
+            integer, intent(in)                         :: begin_i1     ! indice corresponding to the first place into the cartography
+                                                                        ! array where indice along the the direction of the group of lines are stored.
+            integer, dimension(:,:), intent(in)         :: cartography
+            real(WP),dimension(:), intent(in)           :: buffer       ! buffer containing the data to redistribute into the local scalar field.
+            real(WP), dimension(:,:,:), intent(inout)   :: scalar       ! the scalar field.
+            integer, intent(inout)                      :: beg_buffer   ! first indice inside where the scalar values are stored into the buffer
+                                                                        ! for the current sender processus. To know where reading data into the buffer.
+        end subroutine remesh_buffer_to_scalar
+    end interface
+
+end module advec_abstract_proc
diff --git a/HySoP/src/scalesReduced/particles/advec_variables.f90 b/HySoP/src/scalesReduced/particles/advec_variables.f90
new file mode 100644
index 0000000000000000000000000000000000000000..3ba18041cf3ff594d73151fc1ee8871077ac07cd
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/advec_variables.f90
@@ -0,0 +1,308 @@
+!USEFORTEST advec
+!USEFORTEST interpolation
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_variables
+!
+!
+! DESCRIPTION:
+!> The module ``advec_variables'' gather all variables that have to been shared by diffrenrent advection
+!! modules. It also provide a set of method to set the protected or private variables to the right values.
+!! @details
+!! It contains the variables common to the solver along each direction and other generic variables used for the
+!! advection based on the particle method. It provied functions to set
+!! them to right values depending on the choosen remeshing formula.
+!!
+!! This module is not supposed to be used by the main code but only by the other advection module.
+!! More precisly, a final user must only used the generic "advec" module wich contains all the interface
+!! to initialize the solver (eg choosing the remeshing formula and the dimension splitting) and to solve
+!! the advection equation with the particle method.
+!!
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module advec_variables
+
+    use precision_tools
+
+    implicit none
+
+    ! ===== Public and protected variables =====
+    ! ----- Minimal and maximal indice of the buffer used in the different communication -----
+    !> minimal indice of the send buffer
+    integer, public                             :: send_j_min
+    !> maximal indice of the send buffer
+    integer, public                             :: send_j_max
+    !> To take in account diffusion inside remeshing
+    real(WP), protected, dimension(:,:), allocatable :: sc_diff_dt_dx
+
+
+    ! ------ Solver context -----
+    ! solver choosen
+    character(len=str_short), protected         :: type_solv
+    integer, dimension(2), protected            :: remesh_stencil
+    ! ------ Remeshing information -----
+    !> number of particles in a block
+    integer, protected                          :: bl_size
+    !> distance between the "central" mesh point and the extream mesh point of the stencil of points used to remesh a particle
+    integer, protected                          :: bl_bound_size
+    !> Number of common meshes used in the remeshing of two successive particle
+    !! (in case off standart (ie non corrected) remeshing formula)).
+    integer, dimension(2), protected            :: bl_remesh_superposition
+    !> Number of block on each processus along each direction
+    integer, dimension(3), protected            :: bl_nb
+    !> Maximum CFL number allowed by communications for the current parameters
+    integer, protected                          :: CFL_max
+
+    ! ------ To ensure unique mpi message tag -----
+    ! Tag generate with a proc_gap
+    !> To create tag used in AC_particle_velocity to send range
+    integer, dimension(2), parameter            :: tag_velo_range = (/ 0,1 /)
+    !> To create tag used in AC_particle_velocity to send velocity field
+    integer, dimension(2), parameter            :: tag_velo_V = (/ 0,2 /)
+    !> To create tag used in bufferToScalar to send range of buffer which will be send
+    integer, dimension(2), parameter            :: tag_bufToScal_range = (/ 0,3 /)
+    !> To create tag used in bufferToScalar to send the buffer used to remesh particles
+    integer, dimension(2), parameter            :: tag_bufToScal_buffer = (/ 0,4 /)
+
+    ! Tag generate with "compute_gap_NP"
+    !> To create tag used in AC_obtain_recevers to send ghost
+    integer, dimension(2), parameter            :: tag_obtrec_ghost_NP = (/ 0, 1/)
+    !> To create tag used in AC_type_and_bloc to exchange ghost with neighbors
+    integer, dimension(2), parameter            :: tag_part_tag_NP = (/ 0, 2/)
+    !> To create tag used in AC_obtain_recevers to send message about recevers of minimal and maximal rank
+    integer, dimension(2), parameter            :: tag_obtrec_NP = (/ 0, 3/)
+    !> To create tag used in AC_obtain_receivers to send message about senders of minimal and maximal rank
+    integer, dimension(2), parameter            :: tag_obtsend_NP = (/ 0, 4/)
+    !> To create tag used in advecY_limitator_group to exchange ghost with neighbors
+    integer, dimension(2), parameter            :: tag_part_slope = (/ 0, 5/)
+
+    ! ===== Public procedures =====
+    !----- Initialize solver -----
+    public                                      :: AC_solver_init
+    public                                      :: AC_set_part_bound_size
+
+contains
+
+! ====================================================================
+! ====================    Initialize context      ====================
+! ====================================================================
+
+!> Initialize some variable related to the solver implementation (and which
+!! depend of the resmeshing formula choosen and the dimmensionnal splitting used).
+!!    @param[in]        part_solv   = remeshing formula choosen (spcae order, ...)
+!!    @param[in]        verbosity   = to display info about chosen remeshing formula (optional)
+subroutine AC_solver_init(part_solv, verbosity)
+
+    use cart_topology   ! info about mesh and mpi topology
+
+    ! Input/Output
+    character(len=*), optional, intent(in)  ::  part_solv
+    logical, optional, intent(in)  ::  verbosity
+    ! Others
+    logical :: verbose
+
+    ! Set verbosity
+    verbose = .true.
+    if (present(verbosity)) verbose = verbosity
+
+    ! Initialisation part adapted to each method
+    if (present(part_solv)) type_solv = part_solv
+    select case(type_solv)
+        case('p_O2')
+            bl_size = 2
+            bl_bound_size = 1
+            remesh_stencil = 1
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '========== Advection scheme ========='
+                write(*,'(6x,a)') ' particle method, corrected lambda 2 '
+                write(*,'(6x,a)') '====================================='
+            end if
+        case('p_O4')
+            bl_size = 4
+            bl_bound_size = 2
+            remesh_stencil = 2
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '========== Advection scheme ========='
+                write(*,'(6x,a)') ' particle method, corrected lambda 4 '
+                write(*,'(6x,a)') '====================================='
+            end if
+        case('p_M4')
+            bl_size = 1!2
+            bl_bound_size = 2   ! Be aware : don't use it to compute superposition between
+                                ! mpi processes (not as predictible as corrected scheme)
+            remesh_stencil = (/1,2/)
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '========== Advection scheme ========='
+                write(*,'(6x,a)') ' particle method,           M prime 4'
+                write(*,'(6x,a)') '====================================='
+            end if
+        case('p_M6')
+            bl_size = 1!2
+            bl_bound_size = 3   ! Be aware : don't use it to compute superposition between
+                                ! mpi processes (not as predictible as corrected scheme)
+            remesh_stencil = (/2,3/)
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '========== Advection scheme ========='
+                write(*,'(6x,a)') ' particle method,           M prime 6'
+                write(*,'(6x,a)') '====================================='
+            end if
+        case('p_M8')
+            bl_size = 2
+            bl_bound_size = 4   ! Be aware : don't use it to compute superposition between
+                                ! mpi processes (not as predictible as corrected scheme)
+            remesh_stencil = (/3,4/)
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '========== Advection scheme ========='
+                write(*,'(6x,a)') ' particle method,           M prime 8'
+                write(*,'(6x,a)') '====================================='
+            end if
+        case('d_M4')
+            bl_size = 1!2
+            bl_bound_size = 2   ! Be aware : don't use it to compute superposition between
+                                ! mpi processes (not as predictible as corrected scheme)
+            remesh_stencil = (/1,2/)
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '============= Advection scheme ==========='
+                write(*,'(6x,a)') ' particle method, M prime 4 with diffusion'
+                write(*,'(6x,a)') '=========================================='
+            end if
+        case('p_44')
+            bl_size = 1!2
+            bl_bound_size = 3   ! Be aware : don't use it to compute superposition between
+                                ! mpi processes (not as predictible as corrected scheme)
+            remesh_stencil = (/2,3/)
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '========== Advection scheme ========='
+                write(*,'(6x,a)') '     particle method, Lambda 4,4     '
+                write(*,'(6x,a)') '====================================='
+            end if
+        case('p_64')
+            bl_size = 1!2
+            bl_bound_size = 4   ! Be aware : don't use it to compute superposition between
+                                ! mpi processes (not as predictible as corrected scheme)
+            remesh_stencil = (/3,4/)
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '========== Advection scheme ========='
+                write(*,'(6x,a)') '     particle method, Lambda 6,4     '
+                write(*,'(6x,a)') '====================================='
+            end if
+        case('p_66')
+            bl_size = 1!2
+            bl_bound_size = 4   ! Be aware : don't use it to compute superposition between
+                                ! mpi processes (not as predictible as corrected scheme)
+            remesh_stencil = (/3,4/)
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '========== Advection scheme ========='
+                write(*,'(6x,a)') '     particle method, Lambda 6,6     '
+                write(*,'(6x,a)') '====================================='
+            end if
+        case('p_84')
+            bl_size = 1!2
+            bl_bound_size = 5   ! Be aware : don't use it to compute superposition between
+                                ! mpi processes (not as predictible as corrected scheme)
+            remesh_stencil = (/4,5/)
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '========== Advection scheme ========='
+                write(*,'(6x,a)') '     particle method, Lambda 8,4     '
+                write(*,'(6x,a)') '====================================='
+            end if
+        ! For legacy
+        case('p_L4')
+            bl_size = 1!2
+            bl_bound_size = 3   ! Be aware : don't use it to compute superposition between
+                                ! mpi processes (not as predictible as corrected scheme)
+            remesh_stencil = (/2,3/)
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '========== Advection scheme ========='
+                write(*,'(6x,a)') '     particle method, Lambda 4,4     '
+                write(*,'(6x,a)') '====================================='
+            end if
+        case('p_L6')
+            bl_size = 1!2
+            bl_bound_size = 4   ! Be aware : don't use it to compute superposition between
+                                ! mpi processes (not as predictible as corrected scheme)
+            remesh_stencil = (/3,4/)
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '========== Advection scheme ========='
+                write(*,'(6x,a)') '     particle method, Lambda 6,6     '
+                write(*,'(6x,a)') '====================================='
+            end if
+        case default
+            bl_size = 2
+            bl_bound_size = 1
+            remesh_stencil = 1
+            if ((cart_rank==0).and.(verbose)) then
+                write(*,'(6x,a)') '========== Advection scheme ========='
+                write(*,'(6x,a)') ' particle method, corrected lambda 2 '
+                write(*,'(6x,a)') '====================================='
+            end if
+    end select
+
+    ! Check if the subdomain contain a number of mesh wich could be divided by bl_size
+    if ((modulo(mesh_sc%N_proc(1),bl_size)/=0).OR.  &
+      & (modulo(mesh_sc%N_proc(2),bl_size)/=0).OR.  &
+      & (modulo(mesh_sc%N_proc(3),bl_size)/=0)) then
+        if (cart_rank ==0) print*, 'Number of mesh by processus must be a muliple of ', bl_size
+        stop
+    end if
+
+    ! Compute local number of block along each direction
+    bl_nb = mesh_sc%N_proc/bl_size
+
+    ! Compute maximal CFL number
+    CFL_max = minval(mesh_sc%N_proc)*(size(neighbors)/2)
+
+    ! To take in account for diffusion during the remeshing operation
+    if(.not. allocated(sc_diff_dt_dx)) allocate(sc_diff_dt_dx(1,3))
+
+end subroutine AC_solver_init
+
+!> Manually change protected variable "bl_bound_size" - purpose test only (for
+!! auto-validation tests)
+!!    @param[in]        bound_size   = wanted value of "bl_bound_part"
+subroutine AC_set_part_bound_size(bound_size)
+
+    ! Input/Ouput
+    integer, intent(in) :: bound_size
+
+    bl_bound_size = bound_size
+
+end subroutine AC_set_part_bound_size
+
+!> Set manually the diffusion parameter for taking into account diffusion
+!! directly in remeshing.
+subroutine AC_set_diff_dt_dx(sc_diff)
+
+  use cart_topology
+
+  ! Input/Output
+  real(WP), dimension(:), intent(in)  ::  sc_diff
+  ! Local
+  integer                             :: ind
+! character(len=10)                 :: format_out
+
+  if (size(sc_diff_dt_dx,1) /= size(sc_diff)) then
+    deallocate(sc_diff_dt_dx)
+    allocate(sc_diff_dt_dx(size(sc_diff),3))
+  end if
+  do ind =1, 3
+    sc_diff_dt_dx(:,ind) = sc_diff/(mesh_sc%dx(ind)**2)
+  end do
+
+! if(cart_rank==0) then
+!   write(format_out,'(a,i0,a)') '(a,', size(sc_diff_dt_dx,1), 'g15.8)'
+!   write(*,format_out) 'diff along X = ', sc_diff_dt_dx(:,1)
+!   write(*,format_out) 'diff along Y = ', sc_diff_dt_dx(:,2)
+!   write(*,format_out) 'diff along Z = ', sc_diff_dt_dx(:,3)
+! end if
+
+end subroutine AC_set_diff_dt_dx
+
+
+end module advec_variables
diff --git a/HySoP/src/scalesReduced/particles/interpolation_velo.f90 b/HySoP/src/scalesReduced/particles/interpolation_velo.f90
new file mode 100644
index 0000000000000000000000000000000000000000..d63bc148dfd3f8be61e63a5639d12884b4fd8610
--- /dev/null
+++ b/HySoP/src/scalesReduced/particles/interpolation_velo.f90
@@ -0,0 +1,1037 @@
+!USEFORTEST interpolation
+!USEFORTEST advec
+!> @addtogroup part
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: advec_common_velo
+!
+!
+! DESCRIPTION:
+!> The module ``advec_common_velo'' gather function and subroutines used to interpolate
+!! velocity at particle position which are not specific to a direction
+!! @details
+!! This module gathers functions and routines used to advec scalar which are not
+!! specific to a direction. This is a parallel implementation using MPI and
+!! the cartesien topology it provides. It also contains the variables common to
+!! the solver along each direction and other generic variables used for the
+!! advection based on the particle method.
+!!
+!! Except for testing purpose, this module is not supposed to be used by the
+!! main code but only by the other advection module. More precisly, an final user
+!! must only used the generic "advec" module wich contain all the interface to
+!! solve the advection equation with the particle method, and to choose the
+!! remeshing formula, the dimensionnal splitting and everything else. Except for
+!! testing purpose, the other advection modules have only to include
+!! "advec_common".
+!!
+!! The module "test_advec" can be used in order to validate the procedures
+!! embedded in this module.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module Interpolation_velo
+
+    use precision_tools
+    use cart_topology
+    use mpi, only: MPI_STATUS_SIZE, MPI_STATUSES_IGNORE
+    implicit none
+
+    public
+
+
+    ! ===== Public procedures =====
+    !----- To interpolate velocity -----
+
+    ! ===== Interpolation formula =====
+    ! For newer version of GCC (>= 4.7) replace these subroutine by function
+    ! which return the weight (array) with position as input argument. It works
+    ! with gcc 4.7 and later and IBM compiler, and intel too.
+    !> Generic subroutine (pointer intialize to the choosen formula)
+    procedure(weight_M4), pointer,  public :: get_weight => null()
+    !> Specific interpolation formula
+    public :: weight_M4, weight_Mprime4, weight_Lambda4_4, weight_linear
+
+    ! ===== Private variables =====
+    character(len=4), protected :: interpol = 'Mp4'
+    integer, protected :: stencil_size = 4
+    integer, protected :: stencil_g = 1
+    integer, protected :: stencil_d = 2
+
+
+contains
+
+! ===== Public procedure =====
+
+! ============================================================
+! ====================     Initialisation ====================
+! ============================================================
+!> To choose interpolation formula
+subroutine interpol_init(formula, verbose)
+
+    character(len=*), optional, intent(in)  ::  formula
+    logical, optional, intent(in)           ::  verbose
+
+    logical :: verbosity
+
+    if(present(formula)) then
+      interpol = formula
+    else
+      interpol = 'Mp4'
+    end if
+
+    verbosity = .false.
+    if(present(verbose)) verbosity = verbose
+
+    select case(trim(interpol))
+    case('lin')
+      stencil_size = 2
+      stencil_d = 1
+      stencil_g = 0
+      get_weight => weight_linear
+      if ((cart_rank==0).and.(verbosity)) &
+        & write(*,'(6x,a)') '============= Interpolation = linear  ==========='
+    case('L4_4')
+      stencil_size = 6
+      stencil_d = 3
+      stencil_g = 2
+      get_weight => weight_Lambda4_4
+      if ((cart_rank==0).and.(verbosity)) &
+        & write(*,'(6x,a)') '============= Interpolation = Lambda 4,4 ==========='
+    case('M4')
+      stencil_size = 4
+      stencil_d = 2
+      stencil_g = 1
+      get_weight => weight_M4
+      if ((cart_rank==0).and.(verbosity)) &
+        & write(*,'(6x,a)') '============= Interpolation = M 4 ==========='
+    case default
+      stencil_size = 4
+      stencil_d = 2
+      stencil_g = 1
+      get_weight => weight_Mprime4
+      if ((cart_rank==0).and.(verbosity)) &
+        & write(*,'(6x,a)') '============= Interpolation = Mprime 4 ==========='
+    end select
+
+end subroutine interpol_init
+
+! ==========================================================================================
+! ====================     Interpolation of each velocity component     ====================
+! ==========================================================================================
+! Except for test purpose, only these brick must
+
+! For advection solver
+subroutine Interpol_2D_3D_vect(dx_f, dx_c, Vx, Vy, Vz, Vx_c, Vx_f, Vy_c, Vy_f, Vz_c, Vz_f)
+
+  real(WP), dimension(3), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: Vx, Vy, Vz
+  real(WP), dimension(:,:,:),intent(inout)  :: Vx_c, Vy_c, Vz_c
+  real(WP), dimension(:,:,:),intent(inout)  :: Vx_f, Vy_f, Vz_f
+
+  call Interpol_2D_vect(dx_f, dx_c, Vx, Vy, Vz, Vx_c, Vy_c, Vz_c)
+
+  call Inter_FirstDir_no_com(Vx_c, dx_c(1), Vx_f, dx_f(1))
+
+  call Inter_FirstDir_com(2, Vy_c, dx_c(2), Vy_f, dx_f(2))
+  call Inter_FirstDir_com(3, Vz_c, dx_c(3), Vz_f, dx_f(3))
+
+end subroutine Interpol_2D_3D_vect
+
+
+
+!> Interpolate each componnent of a vector along a transverse direction.
+!!    @param[in]        dx_c        = space step on the coarse grid (for last direction)
+!!    @param[in]        dx_f        = space step on the fine grid (for last direction)
+!!    @param[in]        Vx          = vector component along X
+!!    @param[in]        Vy          = vector component along Y
+!!    @param[in]        Vz          = vector component along Z
+!!    @param[out]       InterX      = interpolation ov Vx along Y and Z
+!!    @param[out]       InterY      = interpolation ov VY along X and Z
+!!    @param[out]       InterZ      = interpolation ov VZ along X and Y
+subroutine Interpol_2D_vect(dx_f, dx_c, Vx, Vy, Vz, InterX, InterY, InterZ)
+
+  real(WP), dimension(3), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: Vx, Vy, Vz
+  real(WP), dimension(:,:,:),intent(inout)  :: InterX, InterY, InterZ
+  ! Local variable
+  real(WP), dimension(2)                    :: d_f, d_c
+
+  ! For Vx, interpolation along Y and Z
+  call Inter_YZ(Vx, dx_c(2:3), InterX, dx_f(2:3))
+  ! For Vy, interpolation along Z (with communications) then along X (no communication required)
+  d_c = (/dx_c(1), dx_c(3)/)
+  d_f = (/dx_f(1), dx_f(3)/)
+  call Inter_XZ_permut(Vy, d_c, InterY, d_f)
+  ! For Vz, interpolation along Y (with communications) then along X (no communication required)
+  call Inter_XY_permut(Vz, d_c(1:2), InterZ, d_f(1:2))
+
+end subroutine Interpol_2D_vect
+
+!> 3D interpolation of a field to a finer grid - no transpositions.
+!!    @param[in]        V_coarse    = velocity to interpolate along the last direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for last direction)
+!!    @param[in]        V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for last direction)
+subroutine Interpol_3D(V_coarse, dx_c, V_fine, dx_f)
+
+  real(WP), dimension(3), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local variable
+  real(WP), dimension(size(V_fine,1),size(V_coarse,2),size(V_coarse,3))    :: V_middle ! to save result of interpolation along X
+
+  ! Interpolate along X
+  call Inter_FirstDir_no_com(V_coarse, dx_c(1), V_middle, dx_f(1))
+
+  ! And then along Y and Z
+  call Inter_YZ(V_middle, dx_c(2:3), V_fine, dx_f(2:3))
+
+end subroutine Interpol_3D
+
+! ========================================================================
+! ====================        2D interpolation        ====================
+! ========================================================================
+
+!> Interpolate a field (ordonnate along X,Y,Z) along X and Y-axis
+!!    @param[in]        V_coarse    = velocity to interpolate along the last direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for last direction)
+!!    @param[in]        V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along the third
+!! direction.
+subroutine Inter_XY(V_coarse, dx_c, V_fine, dx_f)
+
+  
+
+  ! Input/Output
+  real(WP), dimension(2), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local
+  real(WP), dimension(size(V_coarse,1),size(V_coarse,3),size(V_coarse,2))  :: V_permut ! permutation required for first interpolation
+  real(WP), dimension(size(V_coarse,1),size(V_fine,2),size(V_coarse,3))    :: V_middle ! to save result of interpolation along Z + permutation
+  integer :: ind  ! loop indice
+
+  ! Check field sizes
+  if(.not.(size(V_fine,3)==size(V_coarse,3))) then
+    write(*,'(a)') '[ERROR] Interpolation along XY : V_coarse and V_fine does not have the same resolution along Z axis'
+    stop
+  end if
+
+  ! Permutation to prepare first interpolation
+  do ind = 1, size(V_coarse,3)
+    V_permut(:,ind,:) = V_coarse(:,:,ind)
+  end do
+
+  ! Interpolation along last direction = Y-direction + permutation to re-order indices
+  call Inter_LastDir_Permut_com(2, V_permut, dx_c(2), V_middle, dx_f(2))
+
+  ! Interpolation along X = first direction
+  call Inter_FirstDir_no_com(V_middle, dx_c(1), V_fine, dx_f(1))
+
+end subroutine Inter_XY
+
+
+!> Interpolate a field (ordannate along X,Y,Z) along X and Y-axis + permutation
+!! in order to get a field sotred along (Z,X,Y)
+!!    @param[in]        V_coarse    = velocity to interpolate along the last direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for last direction)
+!!    @param[in]        V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along the third
+!! direction.
+subroutine Inter_XY_permut(V_coarse, dx_c, V_fine, dx_f)
+
+  
+
+  ! Input/Output
+  real(WP), dimension(2), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local
+  real(WP), dimension(size(V_coarse,1),size(V_coarse,3),size(V_coarse,2))  :: V_permut ! permutation required for first interpolation
+  real(WP), dimension(size(V_coarse,1),size(V_coarse,3),size(V_fine,3))    :: V_middle ! to save result of interpolation along Z + permutation
+  integer :: ind  ! loop indice
+
+  ! Check field sizes
+  if(.not.(size(V_fine,1)==size(V_coarse,3))) then
+    write(*,'(a)') '[ERROR] Interpolation along XY : V_coarse and V_fine does not have the same resolution along Z axis'
+    stop
+  end if
+
+  ! Permutation to prepare first interpolation
+  do ind = 1, size(V_coarse,3)
+    V_permut(:,ind,:) = V_coarse(:,:,ind)
+  end do
+
+  ! Interpolation along last direction = Y-direction
+  call Inter_LastDir_com(2, V_permut, dx_c(2), V_middle, dx_f(2))
+
+  ! Interpolation along X = first direction  + permutation to re-order indices
+  call Inter_FirstDir_Permut_no_com(V_middle, dx_c(1), V_fine, dx_f(1))
+
+end subroutine Inter_XY_permut
+
+
+!> Interpolate a field (ordannate along X,Y,Z) along Y and Z-axis
+!!    @param[in]        V_coarse    = velocity to interpolate along Y and Z directions
+!!    @param[in]        dx_c        = space step on the coarse grid (for second and last directions)
+!!    @param[in,out]    V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for second and last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along the first direction.
+subroutine Inter_YZ(V_coarse, dx_c, V_fine, dx_f)
+
+  
+
+  ! Input/Output
+  real(WP), dimension(2), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local
+  real(WP), dimension(size(V_coarse,1),size(V_fine,3),size(V_coarse,2))    :: V_middle ! to save result of interpolation along Z + permutation
+
+
+  ! Check if array have the right size
+  if(.not.(size(V_fine,1)==size(V_coarse,1))) then
+    write(*,'(a)') '[ERROR] Interpolation along YZ : V_coarse and V_fine does not have the same resolution along first direction'
+    stop
+  end if
+
+  ! Interpolation along Z + permutation between Y and Z
+  call Inter_LastDir_Permut_com(3, V_coarse, dx_c(2), V_middle, dx_f(2))
+
+  ! Interpolation along Y(=third direction thanks to previous permutation) + permutation between Y and Z
+  call Inter_LastDir_Permut_com(2, V_middle, dx_c(1), V_fine, dx_f(1))
+
+end subroutine Inter_YZ
+
+
+!> Interpolate a field (ordannate along X,Y,Z) along X and Z-axis
+!!    @param[in]        V_coarse    = velocity to interpolate along X and Z directions
+!!    @param[in]        dx_c        = space step on the coarse grid (for first and last directions)
+!!    @param[in,out]    V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for first and last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along the second direction.
+!! direction.
+subroutine Inter_XZ(V_coarse, dx_c, V_fine, dx_f)
+
+  
+
+  ! Input/Output
+  real(WP), dimension(2), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local
+  real(WP), dimension(size(V_coarse,1),size(V_coarse,2),size(V_fine,3))    :: V_middle ! to save result of interpolation along Z + permutation
+
+
+  ! Check if array have the right size
+  if(.not.(size(V_fine,2)==size(V_coarse,2))) then
+    write(*,'(a)') '[ERROR] Interpolation along XZ : V_coarse and V_fine does not have the same resolution along first direction'
+    stop
+  end if
+
+  ! Interpolation along Z
+  call Inter_LastDir_com(3, V_coarse, dx_c(2), V_middle, dx_f(2))
+
+  ! Interpolation along X
+  call Inter_FirstDir_no_com(V_middle, dx_c(1), V_fine, dx_f(1))
+
+end subroutine Inter_XZ
+
+
+!> Interpolate a field (ordannate along X,Y,Z) along X and Z-axis and get a
+!! field stored in function of (Y,X,Z)
+!!    @param[in]        V_coarse    = velocity to interpolate along X and Z directions
+!!    @param[in]        dx_c        = space step on the coarse grid (for first and last directions)
+!!    @param[in,out]    V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for first and last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along the X-axis.
+subroutine Inter_XZ_permut(V_coarse, dx_c, V_fine, dx_f)
+
+  
+
+  ! Input/Output
+  real(WP), dimension(2), intent(in)        :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local
+  real(WP), dimension(size(V_coarse,1),size(V_coarse,2),size(V_fine,3))    :: V_middle ! to save result of interpolation along Z + permutation
+
+
+  ! Check if array have the right size
+  if(.not.(size(V_fine,1)==size(V_coarse,2))) then
+    write(*,'(a)') '[ERROR] Interpolation along XZ_permut : V_coarse and V_fine does not have the same resolution along first direction'
+    stop
+  end if
+
+  ! Interpolation along Z
+  call Inter_LastDir_com(3, V_coarse, dx_c(2), V_middle, dx_f(2))
+
+  ! Interpolation along X
+  call Inter_FirstDir_Permut_no_com(V_middle, dx_c(1), V_fine, dx_f(1))
+
+end subroutine Inter_XZ_permut
+
+! =================================================================================
+! ====================   Elementary brick = 1D interpolation   ====================
+! =================================================================================
+! Do not use directly this, except for test purpose. If you want to use it,
+! check the input size (not checked here, because already tested in function
+! wich call them, when needed)
+
+!> Interpolate a field along the last direction - with communication : V_fine(i,j,k) = interpolation(V_coarse(i,j,k_interpolation))
+!!    @param[in]        dir         = last directions (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        V_coarse    = velocity to interpolate along the last direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for last direction)
+!!    @param[in,out]    V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along second and third
+!! directions.
+subroutine Inter_LastDir_com(dir, V_coarse, dx_c, V_fine, dx_f)
+
+  
+
+  ! Input/Output
+  integer, intent(in)                       :: dir
+  real(WP), intent(in)                      :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local variable
+  real(WP), dimension(:,:,:), allocatable   :: V_beg, V_end ! ghost values of velocity
+  real(WP), dimension(stencil_size)         :: weight       ! interpolation weight
+  integer               :: i,ind,i_bis, V_ind   ! some loop indices
+  integer               :: ind_max, ind_min, ind_limit, ind_limit_2
+  real(WP)              :: pos
+  integer               :: N_coarse, N_fine     ! number of grid points
+  integer               :: com_pos              ! to deal with multiple communications - if (stencil size)/2 > local size of coarse grid
+                                                ! = position where ghost values are recieved in V_beg and  V_end
+  integer, dimension(2) :: com_nb               ! number of communcation (if (stencil size)/2 > local size of coarse grid)
+  integer, dimension(2) :: com_size             ! size of mpi communication for ghost points
+  integer               :: ierr                 ! mpi error code
+  integer, dimension(:),allocatable   :: beg_request  ! mpi communication request (handle) of nonblocking receive
+  integer, dimension(:),allocatable   :: end_request  ! mpi communication request (handle) of nonblocking receive
+  real(WP), parameter :: eps = 1e-6
+
+  ! Initialisation
+  com_size(1) = size(V_coarse,1)*size(V_coarse,2)
+  N_coarse = size(V_coarse,3)
+  N_fine = size(V_fine,3)
+  ! ind_max = max(indice ind on fine grid as V_fine(i) can be computed without communication)
+  !         = max{ind : V_ind=floor[(ind-1)*dx_f/dx_c]+1  <=  (N_coarse-stencil_d)            }
+  !         = max{ind : V_ind=floor[(ind-1)*dx_f/dx_c]    <=  (N_coarse-stencil_d-1)          }
+  !         = max{ind : pos  =      (ind-1)*dx_f/dx_c     <   (N_coarse-stencil_d-1)+1        }
+  !         = max{ind :             (ind-1)               <   (N_coarse-stencil_d)*dx_c/dx_f  }
+  !         = max{ind : ind < [(N_coarse-stencil_d)*(dx_c/dx_f)]+1}
+  ! Define real_max = [(N_coarse-stencil_d)*(dx_c/dx_f)] as a real. One gets:
+  ! ind_max = max{ind integer as : ind < real_max+1} and thus
+  ! ind_max = real_max            if real_max is an integer
+  !           floor(real_max+1)   else
+  ! ie ind_max = ceiling(real_max)
+  !ind_max = ceiling((N_coarse-stencil_d)*dx_c/dx_f)
+  ind_max = ceiling(((N_coarse-stencil_d)*dx_c/dx_f)-eps)  ! To avoid numerical error and thus segmentation fault
+  ! ind_min = min(indice ind on fine grid as V_fine(i) can be computed without communication)
+  !         = min{ind : V_ind=floor[(ind-1)*dx_f/dx_c]+1 - stencil_g > 0}
+  !         = min{ind : V_ind=floor[(ind-1)*dx_f/dx_c] >  stencil_g -1  }
+  !         = min{ind : V_ind=floor[(ind-1)*dx_f/dx_c] >= stencil_g     }
+  !         = min{ind :             (ind-1)*dx_f/dx_c  >= stencil_g     }
+  !         = min{ind : pos=        (ind-1)*dx_f       >= stencil_g*dx_c}
+  !         = min{ind : ind >= (stencil_g*dx_c/dx_f) + 1}
+  !         = ceiling[(stencil_g*dx_c/dx_f) + 1]
+  ind_min = ceiling((stencil_g)*dx_c/dx_f)+1 ! here numerical truncature can not lead to seg. fault
+
+  ! ==== Communication ====
+  if(stencil_g>0) then
+    allocate(V_beg(size(V_coarse,1),size(V_coarse,2),stencil_g))
+    com_nb(1) = ceiling(real(stencil_g, WP)/N_coarse) ! number of required communication to get ghost
+    allocate(beg_request(com_nb(1)))
+    com_pos = stencil_g+1              ! i = 1 + missing (or remainding) ghost lines
+    com_size(2) = com_size(1)*N_coarse
+    ! Except for last communication, send all local coarse data.
+    ! Note that it happen if local coarse grid containt less than (stencil_size/2)
+      ! points along current direction (ie if coarse grid is very coarse)
+    do ind = 1, com_nb(1)-1
+      com_pos = com_pos - N_coarse  ! = 1 + missing ghost lines after this step
+      ! Communication
+      call Mpi_Irecv(V_beg(1,1,com_pos),com_size(2),MPI_REAL_WP, &
+        & neighbors(dir,-ind), 100+ind, D_comm(dir), beg_request(ind), ierr)
+      call Mpi_Send(V_coarse(1,1,1),com_size(2),MPI_REAL_WP, &
+        & neighbors(dir,ind), 100+ind, D_comm(dir), ierr)
+    end do
+    ! Last communication to complete "right" ghost (begining points)
+    ! We use that missing ghost lines = com_pos - 1
+    com_size(2) = com_size(1)*(com_pos-1)
+    call Mpi_Irecv(V_beg(1,1,1),com_size(2),MPI_REAL_WP, &
+      & neighbors(dir,-com_nb(1)), 1, D_comm(dir), beg_request(com_nb(1)), ierr)
+    call Mpi_Send(V_coarse(1,1,N_coarse-com_pos+2),com_size(2),MPI_REAL_WP, &
+      & neighbors(dir,com_nb(1)), 1, D_comm(dir), ierr)
+  end if
+
+  if(stencil_d>0) then
+    allocate(V_end(size(V_coarse,1),size(V_coarse,2),stencil_d))
+    com_nb(2) = ceiling(real(stencil_d, WP)/N_coarse) ! number of required communication to get ghost
+    allocate(end_request(com_nb(2)))
+    com_pos = 1   ! Reception from next processus is done in position 1
+    com_size(2) = com_size(1)*N_coarse
+    ! Except for last communication, send all local coarse data.
+    ! Note that it happen if local coarse grid containt less than (stencil_size/2)
+      ! points along current direction (ie if coarse grid is very coarse)
+    do ind = 1, com_nb(2)-1
+      ! Communication
+      call Mpi_Irecv(V_end(1,1,com_pos),com_size(2),MPI_REAL_WP, &
+        & neighbors(dir,ind), 200+ind, D_comm(dir), end_request(ind), ierr)
+      call Mpi_Send(V_coarse(1,1,1),com_size(2),MPI_REAL_WP, &
+        & neighbors(dir,-ind), 200+ind, D_comm(dir), ierr)
+      ! next com_pos = (ind*N_coarse)+1 = com_pos + N_coarse
+      com_pos = com_pos + N_coarse
+      end do
+    ! Last step
+    ! Note that: missing ghost lines = stencil_d - (com_nb-1)*N_coarse
+    com_size(2) = com_size(1)*(stencil_d-((com_nb(2)-1)*N_coarse))
+    ! Perform communication
+    call Mpi_Irecv(V_end(1,1,com_pos),com_size(2),MPI_REAL_WP, &
+      & neighbors(dir,com_nb(2)), 2, D_comm(dir), end_request(com_nb(2)), ierr)
+    ! Send data
+    call Mpi_Send(V_coarse(1,1,1),com_size(2),MPI_REAL_WP, &
+      & neighbors(dir,-com_nb(2)), 2, D_comm(dir), ierr)
+  end if
+
+  ! ==== Interpolation ====
+  ! -- For middle points --
+  do ind = ind_min, ind_max
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    call get_weight(pos-V_ind+1, weight)
+    V_ind = V_ind - stencil_g
+    V_fine(:,:,ind) = weight(1)*V_coarse(:,:,V_ind)
+    do i = 1, (stencil_size - 1)
+      V_fine(:,:,ind) = V_fine(:,:,ind) + weight(i+1)*V_coarse(:,:,V_ind+i)
+    end do
+  end do
+  ! -- Wait for communication completion before dealing with the end --
+  if(stencil_g>0) then
+    call mpi_waitall(com_nb(1),beg_request, MPI_STATUSES_IGNORE, ierr)
+    deallocate(beg_request)
+  end if
+  if(stencil_d>0) then
+    call mpi_waitall(com_nb(2),end_request, MPI_STATUSES_IGNORE, ierr)
+    deallocate(end_request)
+  end if
+  ! -- For begining --
+  ! Use that interpolation formula are exact - no computation for the first point of each line
+  V_fine(:,:,1) = V_coarse(:,:,1)
+  ! For other first points
+  do ind = 2, min(ind_min-1, N_fine)  ! Be carful, in some massively parrallel context, ind_min could bigger than N_fine +1
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    call get_weight(pos-V_ind+1, weight)
+    !V_ind = V_ind - stencil_g
+    !V_fine(:,:,ind) = weight(1)*V_beg(:,:,V_ind+stencil_g) ! Array start from 1
+    V_fine(:,:,ind) = weight(1)*V_beg(:,:,V_ind) ! Array start from 1
+    ind_limit = stencil_g - V_ind + 1
+    do i = 2, ind_limit
+      !V_fine(:,:,ind) = V_fine(:,:,ind) + weight(i)*V_beg(:,:,V_ind+stencil_g+i)
+      V_fine(:,:,ind) = V_fine(:,:,ind) + weight(i)*V_beg(:,:,V_ind-1+i) ! first point in V_beg stands for 1-stencil_g position
+    end do
+    ! If N_coarse < stencil_size, last interpolation points does not belong to local domain
+      ! but to domain of processus of coordinnates = (mine+1) inside the mpi-topology.
+      ! Then we search in V_end for values at last interpolation point,
+    ind_limit_2 = min(stencil_size, N_coarse+ind_limit) ! for very coarse grid, stencil size could be bigger than N_coarse
+    do i_bis = ind_limit+1, ind_limit_2
+      ! We look for first local value of V_coarse at position (:,:,1) ! (array starts at 1)
+      V_fine(:,:,ind) = V_fine(:,:,ind) + weight(i_bis)*V_coarse(:,:,i_bis-ind_limit)
+    end do
+    ! Values in V_end
+    do i_bis = ind_limit_2+1, stencil_size
+      V_fine(:,:,ind) = V_fine(:,:,ind) + weight(i_bis)*V_end(:,:,i_bis-ind_limit_2)
+    end do
+  end do
+  ! -- For point of at the end of a line along the current direction --
+  ! If ind_max<= ind_min (ie if stencil_size>N_coarse), computation are already done for first point
+  do ind = max(ind_max+1,ind_min), N_fine
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    call get_weight(pos-V_ind+1, weight)
+    !V_ind = V_ind - stencil_g
+    V_ind = V_ind - stencil_g-1
+    V_fine(:,:,ind) = weight(1)*V_coarse(:,:,V_ind+1)
+    ind_limit = min((stencil_size),N_coarse-V_ind)
+    do i = 2, ind_limit
+      V_fine(:,:,ind) = V_fine(:,:,ind) + weight(i)*V_coarse(:,:,V_ind+i)
+    end do
+    V_ind = V_ind - N_coarse
+    do i_bis = ind_limit+1, stencil_size
+      V_fine(:,:,ind) = V_fine(:,:,ind) + weight(i_bis)*V_end(:,:,i_bis+V_ind)
+    end do
+  end do
+
+  ! Free memory
+  if(stencil_d>0) deallocate(V_end)
+  if(stencil_g>0) deallocate(V_beg)
+
+end subroutine Inter_LastDir_com
+
+
+!> Interpolate a field along the last direction and permut second and third directions - with communication : V_fine(i,j,k) = interpolation(V_coarse(i,j,k_interpolation))
+!!    @param[in]        dir         = last directions (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        V_coarse    = velocity to interpolate along the last direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for last direction)
+!!    @param[in]        V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along second and third
+!! direction.
+subroutine Inter_LastDir_Permut_com(dir, V_coarse, dx_c, V_fine, dx_f)
+
+  
+
+  ! Input/Output
+  integer, intent(in)                       :: dir
+  real(WP), intent(in)                      :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local variable
+  real(WP), dimension(:,:,:), allocatable   :: V_beg, V_end ! ghost values of velocity
+  real(WP), dimension(stencil_size)         :: weight       ! interpolation weight
+  integer               :: i,ind,i_bis, V_ind   ! some loop indices
+  integer               :: ind_max, ind_min, ind_limit, ind_limit_2
+  real(WP)              :: pos
+  integer               :: N_coarse, N_fine     ! number of grid points
+  integer               :: com_pos              ! to deal with multiple communications - if (stencil size)/2 > local size of coarse grid
+                                                ! = position where ghost values are recieved in V_beg and  V_end
+  integer, dimension(2) :: com_nb               ! number of communcation (if (stencil size)/2 > local size of coarse grid)
+  integer, dimension(2) :: com_size             ! size of mpi communication for ghost points
+  integer               :: ierr                 ! mpi error code
+  integer, dimension(:),allocatable   :: beg_request  ! mpi communication request (handle) of nonblocking receive
+  integer, dimension(:),allocatable   :: end_request  ! mpi communication request (handle) of nonblocking receive
+
+  ! Initialisation
+  com_size(1) = size(V_coarse,1)*size(V_coarse,2)
+  N_coarse = size(V_coarse,3)
+  N_fine = size(V_fine,2)
+  ind_max = ceiling(((N_coarse-stencil_d)*dx_c/dx_f)-1e-6)  ! To avoid numerical error and thus segmentation fault
+  ind_min = ceiling((stencil_g)*dx_c/dx_f)+1
+
+  ! ==== Communication ====
+  if(stencil_g>0) then
+    allocate(V_beg(size(V_coarse,1),size(V_coarse,2),stencil_g))
+    com_nb(1) = ceiling(real(stencil_g, WP)/N_coarse) ! number of required communication to get ghost
+    allocate(beg_request(com_nb(1)))
+    com_pos = stencil_g+1              ! i = 1 + missing (or remainding) ghost lines
+    com_size(2) = com_size(1)*N_coarse
+    ! Except for last communication, send all local coarse data.
+    ! Note that it happen if local coarse grid containt less than (stencil_size/2)
+      ! points along current direction (ie if coarse grid is very coarse)
+    do ind = 1, com_nb(1)-1
+      com_pos = com_pos - N_coarse  ! = 1 + missing ghost lines after this step
+      ! Communication
+      call Mpi_Irecv(V_beg(1,1,com_pos),com_size(2),MPI_REAL_WP, &
+        & neighbors(dir,-ind), 100+ind, D_comm(dir), beg_request(ind), ierr)
+      call Mpi_Send(V_coarse(1,1,1),com_size(2),MPI_REAL_WP, &
+        & neighbors(dir,ind), 100+ind, D_comm(dir), ierr)
+    end do
+    ! Last communication to complete "right" ghost (begining points)
+    ! We use that missing ghost lines = com_pos - 1
+    com_size(2) = com_size(1)*(com_pos-1)
+    call Mpi_Irecv(V_beg(1,1,1),com_size(2),MPI_REAL_WP, &
+      & neighbors(dir,-com_nb(1)), 1, D_comm(dir), beg_request(com_nb(1)), ierr)
+    call Mpi_Send(V_coarse(1,1,N_coarse-com_pos+2),com_size(2),MPI_REAL_WP, &
+      & neighbors(dir,com_nb(1)), 1, D_comm(dir), ierr)
+  end if
+
+  if(stencil_d>0) then
+    allocate(V_end(size(V_coarse,1),size(V_coarse,2),stencil_d))
+    com_nb(2) = ceiling(real(stencil_d, WP)/N_coarse) ! number of required communication to get ghost
+    allocate(end_request(com_nb(2)))
+    com_pos = 1   ! Reception from next processus is done in position 1
+    com_size(2) = com_size(1)*N_coarse
+    ! Except for last communication, send all local coarse data.
+    ! Note that it happen if local coarse grid containt less than (stencil_size/2)
+      ! points along current direction (ie if coarse grid is very coarse)
+    do ind = 1, com_nb(2)-1
+      ! Communication
+      call Mpi_Irecv(V_end(1,1,com_pos),com_size(2),MPI_REAL_WP, &
+        & neighbors(dir,ind), 200+ind, D_comm(dir), end_request(ind), ierr)
+      call Mpi_Send(V_coarse(1,1,1),com_size(2),MPI_REAL_WP, &
+        & neighbors(dir,-ind), 200+ind, D_comm(dir), ierr)
+      ! next com_pos = (ind*N_coarse)+1 = com_pos + N_coarse
+      com_pos = com_pos + N_coarse
+      end do
+    ! Last step
+    ! Note that: missing ghost lines = stencil_d - (com_nb-1)*N_coarse
+    com_size(2) = com_size(1)*(stencil_d-((com_nb(2)-1)*N_coarse))
+    ! Perform communication
+    call Mpi_Irecv(V_end(1,1,com_pos),com_size(2),MPI_REAL_WP, &
+      & neighbors(dir,com_nb(2)), 2, D_comm(dir), end_request(com_nb(2)), ierr)
+    ! Send data
+    call Mpi_Send(V_coarse(1,1,1),com_size(2),MPI_REAL_WP, &
+      & neighbors(dir,-com_nb(2)), 2, D_comm(dir), ierr)
+  end if
+
+  ! ==== Interpolation ====
+  ! -- For middle points --
+  do ind = ind_min, ind_max
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    call get_weight(pos-V_ind+1, weight)
+    V_ind = V_ind - stencil_g
+    V_fine(:,ind,:) = weight(1)*V_coarse(:,:,V_ind)
+    do i = 1, (stencil_size - 1)
+      V_fine(:,ind,:) = V_fine(:,ind,:) + weight(i+1)*V_coarse(:,:,V_ind+i)
+    end do
+  end do
+
+  ! -- Wait for communication completion before dealing with the end --
+  if(stencil_g>0) then
+    call mpi_waitall(com_nb(1),beg_request, MPI_STATUSES_IGNORE, ierr)
+    deallocate(beg_request)
+  end if
+  if(stencil_d>0) then
+    call mpi_waitall(com_nb(2),end_request, MPI_STATUSES_IGNORE, ierr)
+    deallocate(end_request)
+  end if
+
+  ! -- For begining --
+  ! Use that interpolation formula are exact
+  V_fine(:,1,:) = V_coarse(:,:,1)
+  ! For other first points
+  do ind = 2, min(ind_min-1, N_fine)  ! Be carful, in some massively parrallel context, ind_min could bigger than N_fine +1
+    pos = (ind-1)*(dx_f/dx_c)
+    V_ind = floor(pos)+1
+    call get_weight(pos-V_ind+1, weight)
+    V_fine(:,ind,:) = weight(1)*V_beg(:,:,V_ind) ! Array start from 1
+    ind_limit = stencil_g - V_ind + 1
+    do i = 2, ind_limit
+      V_fine(:,ind,:) = V_fine(:,ind,:) + weight(i)*V_beg(:,:,V_ind-1+i) ! first point in V_beg stands for 1-stencil_g position
+    end do
+    ind_limit_2 = min(stencil_size, N_coarse+ind_limit) ! for very coarse grid, stencil size could be bigger than N_coarse
+    do i_bis = ind_limit+1, ind_limit_2
+      V_fine(:,ind,:) = V_fine(:,ind,:) + weight(i_bis)*V_coarse(:,:,i_bis-ind_limit)
+    end do
+    do i_bis = ind_limit_2+1, stencil_size
+      V_fine(:,ind,:) = V_fine(:,ind,:) + weight(i_bis)*V_end(:,:,i_bis-ind_limit_2)
+    end do
+  end do
+  ! -- For point of at the end of a line along the current direction --
+  ! If ind_max<= ind_min (ie if stencil_size>N_coarse), computation are already done for first point
+  do ind = max(ind_max+1,ind_min), N_fine
+    pos = (ind-1)*(dx_f/dx_c)
+    V_ind = floor(pos)+1
+    call get_weight(pos-V_ind+1, weight)
+    !V_ind = V_ind - stencil_g
+    V_ind = V_ind - stencil_g -1
+    V_fine(:,ind,:) = weight(1)*V_coarse(:,:,V_ind+1)
+    ind_limit = min((stencil_size),N_coarse-V_ind)
+    do i = 2, ind_limit
+      V_fine(:,ind,:) = V_fine(:,ind,:) + weight(i)*V_coarse(:,:,V_ind+i)
+    end do
+    V_ind = V_ind - N_coarse
+    do i_bis = ind_limit+1, stencil_size
+      V_fine(:,ind,:) = V_fine(:,ind,:) + weight(i_bis)*V_end(:,:,i_bis+V_ind)
+    end do
+  end do
+
+  ! Free memory
+  if(stencil_d>0) deallocate(V_end)
+  if(stencil_g>0) deallocate(V_beg)
+
+end subroutine Inter_LastDir_Permut_com
+
+
+!> Interpolate a field along the first direction - no communication : V_fine(i,j,k) = interpolation(V_coarse(i_interpolation,j,k))
+!!    @param[in]        V_coarse    = velocity to interpolate along the first direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for first direction)
+!!    @param[in]        V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for first direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along second and last
+!! direction.
+subroutine Inter_FirstDir_no_com(V_coarse, dx_c, V_fine, dx_f)
+
+  ! Input/Output
+  real(WP), intent(in)                      :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local variable
+  real(WP), dimension(stencil_size)         :: weight       ! interpolation weight
+  integer               :: N_coarse, N_fine                 ! number of grid points
+  integer               :: i, ind, V_ind                    ! some loop indices
+  real(WP)              :: pos
+
+V_Fine = 0.0_WP
+
+  ! ==== Initialisation ====
+  N_coarse = size(V_coarse,1)
+  N_fine = size(V_fine,1)
+
+  ! ==== Interpolation ====
+  ! Use periodicity for boundaries
+  do ind = 1, N_fine
+    pos = (ind-1)*(dx_f/dx_c)
+    V_ind = floor(pos)+1
+    call get_weight(pos-V_ind+1, weight)
+    V_ind = V_ind - stencil_g
+    V_fine(ind,:,:) = weight(1)*V_coarse(modulo(V_ind-1,N_coarse)+1,:,:)
+    do i = 1, (stencil_size - 1)
+      V_fine(ind,:,:) = V_fine(ind,:,:) + weight(i+1)*V_coarse(modulo(V_ind+i-1,N_coarse)+1,:,:)
+    end do
+  end do
+
+end subroutine Inter_FirstDir_no_com
+
+
+!> Interpolate a field along the first direction and permute first and second direction - no communication : V_fine(j,i,k) = interpolation(V_coarse(i_interpolation,j,k))
+!!    @param[in]        V_coarse    = velocity to interpolate along the first direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for first direction)
+!!    @param[in]        V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for first direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along the directions
+!! without interpolation.
+subroutine Inter_FirstDir_Permut_no_com(V_coarse, dx_c, V_fine, dx_f)
+
+  ! Input/Output
+  real(WP), intent(in)                      :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local variable
+  real(WP), dimension(stencil_size)         :: weight       ! interpolation weight
+  integer               :: N_coarse, N_fine                 ! number of grid points
+  integer               :: i, ind, V_ind                    ! some loop indices
+  integer               :: i1, i2                           ! for permutation along the two first direction
+  real(WP)              :: pos, V_current
+
+  ! ==== Initialisation ====
+  N_coarse = size(V_coarse,1)
+  N_fine = size(V_fine,2)
+
+  ! ==== Interpolation ====
+  ! Use periodicity for boundaries
+  do ind = 1, N_fine
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    call get_weight(pos-V_ind+1, weight)
+    V_ind = V_ind - stencil_g
+    do i2 = 1, size(V_coarse,3)
+      do i1 = 1, size(V_coarse,2)
+        V_current = weight(1)*V_coarse(modulo(V_ind-1,N_coarse)+1,i1,i2)
+        do i = 1, (stencil_size - 1)
+          V_current = V_current + weight(i+1)*V_coarse(modulo(V_ind+i-1,N_coarse)+1,i1,i2)
+        end do
+        V_fine(i1,ind,i2) = V_current
+      end do
+    end do
+  end do
+
+end subroutine Inter_FirstDir_Permut_no_com
+
+
+!> Interpolate a field along the first direction - with communication : V_fine(i,j,k) = interpolation(V_coarse(i_interpolation,j,k))
+!! Variant with communication and where first direction can be different than X-axis.
+!!    @param[in]        dir         = last directions (1 = along X, 2 = along Y and 3 = along Z)
+!!    @param[in]        V_coarse    = velocity to interpolate along the last direction
+!!    @param[in]        dx_c        = space step on the coarse grid (for last direction)
+!!    @param[in,out]    V_fine      = interpolated velocity
+!!    @param[in]        dx_f        = space step on the fine grid (for last direction)
+!! @details
+!!   V_fine and V_coarse must have the same resolution along second and third
+!! directions.
+subroutine Inter_FirstDir_com(dir, V_coarse, dx_c, V_fine, dx_f)
+
+  
+
+  ! Input/Output
+  integer, intent(in)                       :: dir
+  real(WP), intent(in)                      :: dx_f, dx_c
+  real(WP), dimension(:,:,:),intent(in)     :: V_coarse
+  real(WP), dimension(:,:,:),intent(inout)  :: V_fine
+  ! Local variable
+  real(WP), dimension(:,:,:), allocatable   :: V_beg, V_end ! received ghost values of velocity
+  real(WP), dimension(:,:,:), allocatable   :: V_s1, V_s2   ! ghost values of velocity to send
+  real(WP), dimension(stencil_size)         :: weight       ! interpolation weight
+  integer               :: i,ind,i_bis, V_ind   ! some loop indices
+  integer               :: ind_max, ind_min, ind_limit
+  real(WP)              :: pos
+  integer               :: N_coarse, N_fine     ! number of grid points
+  integer, dimension(2) :: com_size             ! size of mpi communication for ghost points
+  integer, dimension(2) :: rece_request         ! mpi communication request (handle) of nonblocking receive
+  integer, dimension(MPI_STATUS_SIZE)         :: status  ! mpi status (for mpi_wait)
+  integer               :: ierr                 ! mpi error code
+
+  ! Initialisation
+  com_size = size(V_coarse,2)*size(V_coarse,3)
+  com_size(1) = com_size(1)*(stencil_g)
+  com_size(2) = com_size(2)*(stencil_d)
+  N_coarse = size(V_coarse,1)
+  N_fine = size(V_fine,1)
+  ind_max = ceiling((N_coarse-stencil_d)*dx_c/dx_f) - 1
+  ind_min = ceiling((stencil_g)*dx_c/dx_f)+1
+
+  ! ==== Communication ====
+  if(stencil_g>0) then
+    allocate(V_beg(stencil_g,size(V_coarse,2),size(V_coarse,3)))
+    ! Initiate non blocking receive
+    call Mpi_Irecv(V_beg(1,1,1),com_size(1),MPI_REAL_WP, &
+      & neighbors(dir,-1), 1, D_comm(dir), rece_request(1), ierr)
+    ! Send data
+    allocate(V_s1(stencil_g,size(V_coarse,2),size(V_coarse,3)))
+    V_s1 = V_coarse(N_coarse-stencil_g+1:N_coarse,:,:)
+    call Mpi_Send(V_s1(1,1,1),com_size(1),MPI_REAL_WP, &
+      & neighbors(dir,1), 1, D_comm(dir), ierr)
+  end if
+
+  if(stencil_d>0) then
+    allocate(V_end(stencil_d,size(V_coarse,2),size(V_coarse,3)))
+    ! Initiate non blocking receive
+    call Mpi_Irecv(V_end(1,1,1),com_size(2),MPI_REAL_WP, &
+      & neighbors(dir,1), 2, D_comm(dir), rece_request(2), ierr)
+    ! Send data
+    allocate(V_s2(stencil_d,size(V_coarse,2),size(V_coarse,3)))
+    V_s2 = V_coarse(1:stencil_d,:,:)
+    call Mpi_Send(V_s2(1,1,1),com_size(2),MPI_REAL_WP, &
+      & neighbors(dir,-1), 2, D_comm(dir), ierr)
+  else
+  end if
+
+  ! ==== Interpolation ====
+  ! -- For middle points --
+  do ind = ind_min, ind_max
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    call get_weight(pos-V_ind+1, weight)
+    V_ind = V_ind - stencil_g
+    V_fine(ind,:,:) = weight(1)*V_coarse(V_ind,:,:)
+    do i = 1, (stencil_size - 1)
+      V_fine(ind,:,:) = V_fine(ind,:,:) + weight(i+1)*V_coarse(V_ind+i,:,:)
+    end do
+  end do
+  ! -- For begining --
+  if(stencil_g>0) call mpi_wait(rece_request(1), status, ierr)
+  ! Use that interpolation formula are exact
+  V_fine(1,:,:) = V_coarse(1,:,:)
+  ! For other first points
+  do ind = 2, ind_min-1
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    call get_weight(pos-V_ind+1, weight)
+    !V_ind = V_ind - stencil_g
+    V_fine(ind,:,:) = weight(1)*V_beg(V_ind,:,:) ! Array start from 1
+    ind_limit = stencil_g - V_ind + 1
+    do i = 2, ind_limit
+      V_fine(ind,:,:) = V_fine(ind,:,:) + weight(i)*V_beg(V_ind-1+i,:,:) ! first point in V_beg stands for 1-stencil_g position
+    end do
+    do i_bis = ind_limit+1, stencil_size
+      V_fine(ind,:,:) = V_fine(ind,:,:) + weight(i_bis)*V_coarse(i_bis-ind_limit,:,:)
+    end do
+  end do
+  ! -- For point of at the end of a line along the current direction --
+  if(stencil_d>0) call mpi_wait(rece_request(2), status, ierr)
+  do ind = ind_max+1, N_fine
+    pos = (ind-1)*dx_f/dx_c
+    V_ind = floor(pos)+1
+    call get_weight(pos-V_ind+1, weight)
+    !V_ind = V_ind - stencil_g
+    V_ind = V_ind - stencil_g-1
+    V_fine(ind,:,:) = weight(1)*V_coarse(V_ind+1,:,:)
+    ind_limit = min((stencil_size),N_coarse-V_ind)
+    do i = 2, ind_limit
+      V_fine(ind,:,:) = V_fine(ind,:,:) + weight(i)*V_coarse(V_ind+i,:,:)
+    end do
+    V_ind = V_ind - N_coarse
+    do i_bis = ind_limit+1, stencil_size
+      V_fine(ind,:,:) = V_fine(ind,:,:) + weight(i_bis)*V_end(i_bis+V_ind,:,:)
+    end do
+  end do
+
+  ! Free memory
+  if(stencil_d>0) deallocate(V_end)
+  if(stencil_d>0) deallocate(V_s2)
+  if(stencil_g>0) deallocate(V_beg)
+  if(stencil_g>0) deallocate(V_s1)
+
+end subroutine Inter_FirstDir_com
+
+
+subroutine weight_Mprime4(pos,weight)
+
+  real(WP), intent(in)                :: pos
+  real(WP), dimension(:), intent(out) :: weight
+
+  !weight(1)  = ((2.-(pos+1.))**2 * (1.-(pos+1.)))/2.
+  weight(1) = (pos * (pos * (-pos + 2.) - 1.)) / 2.
+  !weight(3) = 1.-2.5*(1.-pos)**2 + 1.5*(1.-pos)**3
+  weight(3) = (pos * (pos * (-3. * pos + 4.) + 1.)) / 2.
+  !weight(4) = ((2.-(2.-pos))**2 * (1.-(2.-pos)))/2.
+  weight(4) = (pos * pos * (pos - 1.)) / 2.
+  !weight(2) = 1.- 2.5*pos**2 + 1.5*pos**3
+  weight(2) = 1. - (weight(1)+weight(3)+weight(4))
+
+
+end subroutine weight_Mprime4
+
+
+!> Interpolation with M4 kernel. Order 2 everywhere ?
+subroutine weight_M4(pos,weight)
+
+  real(WP), intent(in)              :: pos
+  real(WP), dimension(:), intent(out) :: weight
+
+  ! kernel =
+  !(1._WP/6._WP)*((-X+2)**3)  if 1<=abs(X)<2
+  !(1._WP/6._WP)*((-X+2)**3) - (4._WP/6._WP)*((-X+1)**3) if abs(X) < 1
+
+  !weight(1) = (1._WP/6._WP)*((-(pos+1)+2)**3)
+  weight(1) = (1._WP/6._WP)*((-pos+1._WP)**3)
+  !weight(2) = (1._WP/6._WP)*((-pos+2)**3) - (4._WP/6._WP)*((-pos+1)**3)
+  !weight(3) = (1._WP/6._WP)*((-(1-pos)+2)**3) - (4._WP/6._WP)*((-(1-pos)+1)**3)
+  weight(3) = (1._WP/6._WP)*((pos+1)**3) - (4._WP/6._WP)*(pos**3)
+  weight(4) = (1._WP/6._WP)*(pos**3)
+  weight(2) = 1. - (weight(1)+weight(3)+weight(4))
+
+
+end subroutine weight_M4
+
+
+!> Interpolation with Lambda(4,4) kernel. Order 4 everywhere.
+subroutine weight_Lambda4_4(pos,weight)
+
+  real(WP), intent(in)              :: pos
+  real(WP), dimension(:), intent(out) :: weight
+
+    weight(1) = (pos*(pos*(pos*(pos*(pos*(pos*(pos*(pos*(-46. * pos + 207.) - 354.) + 273.) - 80.) + 1.) - 2.)- 1.) + 2.)) / 24.
+    weight(2) = (pos*(pos*(pos*(pos*(pos*(pos*(pos*(pos*(230. * pos - 1035.) +1770.) - 1365.) + 400.) - 4.) + 4.) + 16.) - 16.)) / 24.
+    weight(3) = (pos* pos*(pos*pos* (pos*(pos*(pos*(pos*(-460.* pos + 2070.) - 3540.) + 2730.) - 800.) + 6.) - 30.)+ 24.) / 24.
+    weight(4) = (pos*(pos*(pos*(pos*(pos*(pos*(pos*(pos*(460. * pos - 2070.) + 3540.) - 2730.) + 800.) - 4.) - 4.) + 16.) + 16.)) / 24.
+    !weight(5) = (pos*(pos*(pos*(pos*(pos*(pos*(pos*(pos * (-230. * pos + 1035.) - 1770.) + 1365.) - 400.) + 1.) + 2.) - 1.) - 2.)) / 24.
+    weight(6) = (pos*pos*pos*pos*pos*(pos*(pos * (pos * (46. * pos - 207.) + 354.) - 273.) + 80.)) / 24.
+    weight(5) = 1. - (weight(1)+weight(2)+weight(3)+weight(4)+weight(6))
+
+
+end subroutine weight_Lambda4_4
+
+
+!> Basic interpolation formula. Be careful, this kernel may create unphysical oscillation
+!! in low frequency. This is rather implemented to show the requirement of "better"
+!! interpolation kernel.
+subroutine weight_linear(pos,weight)
+
+  real(WP), intent(in)              :: pos
+  real(WP), dimension(:), intent(out) :: weight
+
+    weight(1) = 1-pos
+    weight(2) = pos
+
+end subroutine weight_linear
+
+end module Interpolation_velo
+!> @}
diff --git a/HySoP/src/scalesReduced/precision.conf.in b/HySoP/src/scalesReduced/precision.conf.in
new file mode 100644
index 0000000000000000000000000000000000000000..2a0a3c85aad555608330e127114e1f09a2375947
--- /dev/null
+++ b/HySoP/src/scalesReduced/precision.conf.in
@@ -0,0 +1,19 @@
+!> Select float precision for the whole code.
+!! This is a generated file, do not edit.
+!! Usage :
+!! cmake -DPRECISION=value ...
+!! with value = simple or value = double
+module precision
+
+  use mpi, only: MPI_DOUBLE_PRECISION, MPI_FLOAT
+  implicit none
+  !> Floats precision
+  integer, parameter  :: SP = kind(1.0)
+  integer, parameter  :: DP = kind(1.0d0)
+  !> Chosen precision (set during config. using -DPRECISION=... with cmake)
+  integer, parameter  :: WP = @FLOAT_WORKING_PRECISION@
+  !> MPI type for float
+  integer, parameter     :: MPI_REAL_WP = @MPI_FLOAT_WORKING_PRECISION@
+  !> the string size
+  integer, parameter  :: str_short  = 8
+end module precision
diff --git a/HySoP/src/scalesReduced/precision_tools.f90 b/HySoP/src/scalesReduced/precision_tools.f90
new file mode 100644
index 0000000000000000000000000000000000000000..5dacf3f39db303057276819266899d573633c1b9
--- /dev/null
+++ b/HySoP/src/scalesReduced/precision_tools.f90
@@ -0,0 +1,43 @@
+!USEFORTEST toolbox
+!USEFORTEST postprocess
+!USEFORTEST advec
+!USEFORTEST io
+!USEFORTEST topo
+!USEFORTEST avgcond
+!USEFORTEST interpolation
+!> @addtogroup toolbox
+!! @{
+!------------------------------------------------------------------------------
+!
+! MODULE: precision
+!
+!> @author
+!> Guillaume Balarac, LEGI
+!
+! DESCRIPTION:
+!> The aim of this module is set some parameters to fix the working data
+!> representation in the code. It is set to double precision for REAL.
+!------------------------------------------------------------------------------
+
+MODULE precision_tools
+  use mpi, only: MPI_DOUBLE_PRECISION
+  implicit None
+
+  !> Floats precision
+  INTEGER, PARAMETER  :: SP = kind(1.0)
+  INTEGER, PARAMETER  :: DP = kind(1.0d0)
+  INTEGER, PARAMETER  :: WP = DP
+  !> the MPI type for REAL exchanges in simple or double precision
+  INTEGER, parameter     :: MPI_REAL_WP = MPI_DOUBLE_PRECISION
+  REAL(WP), PRIVATE   :: sample_real_at_WP
+  REAL(WP), PARAMETER :: MAX_REAL_WP = HUGE(sample_real_at_WP)
+  INTEGER, PRIVATE    :: sample_int
+  INTEGER, PARAMETER  :: MAX_INTEGER = HUGE(sample_int)
+  INTEGER, PARAMETER  :: DI = selected_int_kind(r=12)
+  !> the string size
+  INTEGER, PARAMETER  :: str_short  = 8
+  INTEGER, PARAMETER  :: str_medium = 64
+  INTEGER, PARAMETER  :: str_long   = 4096
+
+END MODULE precision_tools
+!> @}
diff --git a/HySoP/src/scalesReduced/structure_tools.f90 b/HySoP/src/scalesReduced/structure_tools.f90
new file mode 100644
index 0000000000000000000000000000000000000000..43de180b3967b66cacf1aa62edfdfdc4d240c38d
--- /dev/null
+++ b/HySoP/src/scalesReduced/structure_tools.f90
@@ -0,0 +1,34 @@
+!------------------------------------------------------------------------------
+!
+! MODULE: structure_tool
+!
+!
+! DESCRIPTION:
+!> This module provides some useful structure like array of pointer (basic
+!fortran only defines pointer to an array) and array of pointer to array.
+!
+!> @author
+!! Jean-Baptiste Lagaert, LEGI
+!
+!------------------------------------------------------------------------------
+
+module structure_tools
+
+  use precision_tools
+
+  implicit none
+
+  ! --- In order to create an array of pointer to real ---
+  type real_pter
+      real(WP), pointer                   :: pter
+  end type real_pter
+  ! --- In order to create an array of pointer to array ---
+  type int_1D_pter
+      integer, dimension(:), pointer      :: pter
+  end type int_1D_pter
+  type real_1D_pter
+      real(WP), dimension(:), pointer     :: pter
+  end type real_1D_pter
+  ! ---------------------------------------------
+
+end module structure_tools
diff --git a/HySoP/src/tests/F2003/testAllocatedPtr.cxx b/HySoP/src/tests/F2003/testAllocatedPtr.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..40f329a18cdc506df865c760b4c9c74edbf67659
--- /dev/null
+++ b/HySoP/src/tests/F2003/testAllocatedPtr.cxx
@@ -0,0 +1,57 @@
+/** \file testAllocatedPtr.cxx 
+    Test F2003 C interoperability 
+    Send a vector (i.e allocated pointer) to Fortran. Must get it back still properly allocated with a different content.
+
+ */
+#include<iostream>
+#include <string>
+#include "WrapC.hpp"
+#include "ParmesDef.hpp"
+#include<vector>
+#include <math.h>
+
+using namespace std ;
+
+using Parmes::Def::real_t;
+
+// Declare the Fortran subroutine
+extern "C"  void wrapC2F_allocatedPtr(double*, int*, real_t*);
+
+
+int main(int argc, char* argv[])
+{
+
+  int length = 12; 
+  std::vector<real_t> myVector(length);
+  for(int i = 0; i<length; ++i)
+    myVector[i] = 2.3;
+  
+  real_t expectedContent = 1.1; 
+  
+  // Send myVector to the Fortran Wrapper.
+  // myVector is supposed to be modified such that myVector[i] = expectedContent *i
+  wrapC2F_allocatedPtr(&myVector[0], &length, &expectedContent);
+  
+  for(int i = 0; i<length ; ++i)
+    cout << myVector[i] << endl;
+
+  if(myVector.size()!= length)
+    {
+      cout << "ERROR" << endl;
+      return 1;
+    }
+  
+  
+  for(int i = 0; i< length ; ++i)
+    {
+      real_t check = myVector[i] - expectedContent*(i+1);
+      real_t tol = 1e-10;
+      if(fabs(check)>tol)
+	{
+	  cout << "ERROR" << endl;
+	  return 1;
+	}
+    }
+   
+}
+
diff --git a/HySoP/src/tests/F2003/testNullPtr.cxx b/HySoP/src/tests/F2003/testNullPtr.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..d552d8912080203aed9b04868ae3ae72c12b2ced
--- /dev/null
+++ b/HySoP/src/tests/F2003/testNullPtr.cxx
@@ -0,0 +1,43 @@
+/** \file testNullPtr.cxx 
+    Test F2003 C interoperability 
+
+    Send a null pointer to Fortran : must send back a properly allocated vector
+    
+ */
+#include<iostream>
+#include <string>
+#include "WrapC.hpp"
+
+using namespace std ;
+
+extern "C" {
+
+  void wrapC2F_NULLPtr(C2FPtr*);
+  void wrapC2F_NULLPtrBis(double**, int*);
+ 
+}
+
+int main(int argc, char* argv[])
+{
+  double * toto = 0;
+  int ntoto;
+  std::cout << "Fortran 2003 wrapper ..." << std::endl;
+  
+  // 
+  C2FPtr * myVector = new C2FPtr;
+  
+  wrapC2F_NULLPtr(myVector);
+  
+  for(int i =0; i<myVector->length; ++i)
+    cout << myVector->elements[i] << endl;
+  
+  wrapC2F_NULLPtrBis(&toto, &ntoto);
+  
+  cout << ntoto << endl;
+  for(int i = 0; i<ntoto ;++i)
+    cout << toto[i] << endl;
+  
+  delete(myVector);
+  
+}
+
diff --git a/HySoP/src/tests/F2003/userMod.f90 b/HySoP/src/tests/F2003/userMod.f90
new file mode 100644
index 0000000000000000000000000000000000000000..25870aa503fcd1ac4b1c80ab12f9f1f918818ff9
--- /dev/null
+++ b/HySoP/src/tests/F2003/userMod.f90
@@ -0,0 +1,66 @@
+module userMod
+
+  implicit none
+  
+contains
+
+  ! Already allocated pointer, no size information, intent(IN)
+  subroutine modifyX(x,factor)
+    
+    real(kind=8), dimension(:),  intent(inout) :: x
+    real(kind=8), intent(in) :: factor
+    
+    integer :: i
+    do i=1,size(x)
+       x(i) = i*factor
+    end do
+    
+  end subroutine modifyX
+
+  subroutine cas4(x)
+
+    real(kind=8), dimension(:),  intent(inout) :: x
+
+
+    print *, 'cas3a', x(1), ' ', x(2)
+
+    x(2) = x(2) +1.65
+    print *, 'cas 3b', x(1), ' ', x(2)
+
+  end subroutine cas4
+
+  subroutine cas5(x)
+
+    !!   integer, intent(in) :: size
+    real(kind=8), pointer, dimension(:) :: x
+
+    allocate(x(2))
+    x(1) =12
+    x(2) = 8
+    print *, 'cas5a', x(1), ' ', x(2)
+
+    x(2) = 1.65
+    print *, 'cas 5b', x(1), ' ', x(2)
+    print * , x
+  end subroutine cas5
+
+  subroutine cas6(x)
+
+    !!   integer, intent(in) :: size
+    real(kind=8), pointer, dimension(:) :: x
+
+    x(1) =12
+    x(2) = 8
+    print *, 'cas6a', shape(x), ' ', x(1), ' ', x(2)
+
+    x(2) = 1.65
+    print *, 'cas 6b', x(1), ' ', x(2)
+    print * , x
+
+  end subroutine cas6
+
+  subroutine Application3()
+
+  end subroutine Application3
+
+end module userMod
diff --git a/HySoP/src/tests/F2003/wrapper.f90 b/HySoP/src/tests/F2003/wrapper.f90
new file mode 100644
index 0000000000000000000000000000000000000000..ef2db12eea93872301067b7e19ffa103cd149cc9
--- /dev/null
+++ b/HySoP/src/tests/F2003/wrapper.f90
@@ -0,0 +1,86 @@
+module testWrap
+
+  ! The fortran wrapper from Parmes
+  use WrapFort
+  ! Some subroutines for tests
+  use userMod
+
+  implicit none
+  
+contains
+
+    !> Send an already allocated C pointer to a Fortran subroutine.
+  !! @param type(c_Ptr) a C pointer (void*)
+  !! @param type(c_int) size of C pointer
+  subroutine wrapC2F_allocatedPtr(cptr, sizeCptr, expectedContent) bind(C, name='wrapC2F_allocatedPtr')
+
+    type(c_Ptr),intent(in),VALUE :: cptr
+    integer (kind=c_int), intent(IN) :: sizeCptr
+    real(kind = c_double), intent(IN) :: expectedContent
+
+    real(kind=c_double), pointer, dimension(:) :: xp => NULL()
+
+    if(NDEBUG) print *, '=== wrapC2F_allocatedPtr ===' 
+    ! Associate cptr and xp. 
+    call c_f_pointer (cptr, xp, (/sizeCPtr/))
+
+    if(.not.associated(xp) ) then
+       print *, 'Error, association failed'
+    end if
+
+    ! Do some stuff on xp ...
+    xp(1) = -3.9
+    call modifyX(xp,expectedContent)
+    print *, '=== End wrapC2F_allocatedPtr === '
+
+  end subroutine WrapC2F_allocatedPtr
+  
+  !> Send a NULL C pointer to a Fortran subroutine and get it back properly allocated 
+  !! @param[inout] type(c_Ptr) a C pointer (void*)
+  !! @param[out] type(c_int) size of C pointer
+  subroutine wrapC2F_NULLPtr(vector) bind(C, name='wrapC2F_NULLPtr')
+
+    type(C2FPtr) :: vector
+
+    real(kind=c_double), pointer, dimension(:) :: xp => NULL()
+
+    if(NDEBUG) print *, '=== wrapC2F_NULLPtr ===' 
+
+    call cas5(xp)
+
+    if(.not.associated(xp) ) then
+       print *, 'Error, association failed'
+    end if
+
+    vector%length = size(xp)
+    vector%elements = c_loc(xp(1))
+
+    print *, '=== End of wrapC2F_NULLPtr === '
+  end subroutine wrapC2F_NULLPtr
+
+    !> Send a NULL C pointer to a Fortran subroutine and get it back properly allocated 
+  !! @param[inout] type(c_Ptr) a C pointer (void*)
+  !! @param[out] type(c_int) size of C pointer
+  subroutine wrapC2F_NULLPtrBis(vector, length) bind(C, name='wrapC2F_NULLPtrBis')
+
+    type(c_ptr),intent(inout) :: vector
+    integer(c_int), intent(out) :: length
+
+    real(kind=c_double), pointer, dimension(:) :: xp => NULL()
+
+    if(NDEBUG) print *, '=== wrapC2F_NULLPtr ===' 
+
+    call cas5(xp)
+
+!!$    if(.not.associated(xp) ) then
+!!$       print *, 'Error, association failed'
+!!$    end if
+!!$    length = size(xp)
+!!$    vector= c_loc(xp(1))
+
+    call aliasF2C(vector, xp, length)
+    
+    print *, '=== End of wrapC2F_NULLPtr === '
+  end subroutine wrapC2F_NULLPtrBis
+
+end module testWrap